Bluetooth: Fix error status when pairing fails
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
23bb5763 60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 61{
f0e09510 62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
23bb5763 63
a5040efa
JH
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
75fb0e32
JH
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1036b890 69 u16 opcode = __le16_to_cpu(sent->opcode);
75fb0e32
JH
70 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
1036b890 79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
75fb0e32
JH
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
23bb5763 88 return;
75fb0e32 89 }
1da177e4
LT
90
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
a8c5fb1a
GP
110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
1da177e4
LT
113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
e175072f 134 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
3ff50b79 144 }
1da177e4 145
a5040efa 146 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
6039aa73
GP
153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
1da177e4
LT
156{
157 int ret;
158
7c6a329e
MH
159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
1da177e4
LT
162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
f630cf0d 175 set_bit(HCI_RESET, &hdev->flags);
a9de9248 176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
177}
178
e61ef499 179static void bredr_init(struct hci_dev *hdev)
1da177e4 180{
b0916ea0 181 struct hci_cp_delete_stored_link_key cp;
1ebb9252 182 __le16 param;
89f2783d 183 __u8 flt_type;
1da177e4 184
2455a3ea
AE
185 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
186
1da177e4
LT
187 /* Mandatory initialization */
188
1da177e4 189 /* Read Local Supported Features */
a9de9248 190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 191
1143e5a6 192 /* Read Local Version */
a9de9248 193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 194
1da177e4 195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 196 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 197
1da177e4 198 /* Read BD Address */
a9de9248
MH
199 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
200
201 /* Read Class of Device */
202 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
203
204 /* Read Local Name */
205 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
206
207 /* Read Voice Setting */
a9de9248 208 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
209
210 /* Optional initialization */
211
212 /* Clear Event Filters */
89f2783d 213 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 215
1da177e4 216 /* Connection accept timeout ~20 secs */
82781e63 217 param = __constant_cpu_to_le16(0x7d00);
a9de9248 218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
219
220 bacpy(&cp.bdaddr, BDADDR_ANY);
221 cp.delete_all = 1;
222 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
223}
224
e61ef499
AE
225static void amp_init(struct hci_dev *hdev)
226{
2455a3ea
AE
227 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
228
e61ef499
AE
229 /* Read Local Version */
230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
231
232 /* Read Local AMP Info */
233 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
234
235 /* Read Data Blk size */
236 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
237}
238
239static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
240{
241 struct sk_buff *skb;
242
243 BT_DBG("%s %ld", hdev->name, opt);
244
245 /* Driver initialization */
246
247 /* Special commands */
248 while ((skb = skb_dequeue(&hdev->driver_init))) {
249 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
250 skb->dev = (void *) hdev;
251
252 skb_queue_tail(&hdev->cmd_q, skb);
253 queue_work(hdev->workqueue, &hdev->cmd_work);
254 }
255 skb_queue_purge(&hdev->driver_init);
256
11778716
AE
257 /* Reset */
258 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
259 hci_reset_req(hdev, 0);
260
e61ef499
AE
261 switch (hdev->dev_type) {
262 case HCI_BREDR:
263 bredr_init(hdev);
264 break;
265
266 case HCI_AMP:
267 amp_init(hdev);
268 break;
269
270 default:
271 BT_ERR("Unknown device type %d", hdev->dev_type);
272 break;
273 }
e61ef499
AE
274}
275
6ed58ec5
VT
276static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
277{
278 BT_DBG("%s", hdev->name);
279
280 /* Read LE buffer size */
281 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
282}
283
1da177e4
LT
284static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
285{
286 __u8 scan = opt;
287
288 BT_DBG("%s %x", hdev->name, scan);
289
290 /* Inquiry and Page scans */
a9de9248 291 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
292}
293
294static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
295{
296 __u8 auth = opt;
297
298 BT_DBG("%s %x", hdev->name, auth);
299
300 /* Authentication */
a9de9248 301 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
302}
303
304static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
305{
306 __u8 encrypt = opt;
307
308 BT_DBG("%s %x", hdev->name, encrypt);
309
e4e8e37c 310 /* Encryption */
a9de9248 311 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
312}
313
e4e8e37c
MH
314static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
315{
316 __le16 policy = cpu_to_le16(opt);
317
a418b893 318 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
319
320 /* Default link policy */
321 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
322}
323
8e87d142 324/* Get HCI device by index.
1da177e4
LT
325 * Device is held on return. */
326struct hci_dev *hci_dev_get(int index)
327{
8035ded4 328 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
329
330 BT_DBG("%d", index);
331
332 if (index < 0)
333 return NULL;
334
335 read_lock(&hci_dev_list_lock);
8035ded4 336 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
337 if (d->id == index) {
338 hdev = hci_dev_hold(d);
339 break;
340 }
341 }
342 read_unlock(&hci_dev_list_lock);
343 return hdev;
344}
1da177e4
LT
345
346/* ---- Inquiry support ---- */
ff9ef578 347
30dc78e1
JH
348bool hci_discovery_active(struct hci_dev *hdev)
349{
350 struct discovery_state *discov = &hdev->discovery;
351
6fbe195d 352 switch (discov->state) {
343f935b 353 case DISCOVERY_FINDING:
6fbe195d 354 case DISCOVERY_RESOLVING:
30dc78e1
JH
355 return true;
356
6fbe195d
AG
357 default:
358 return false;
359 }
30dc78e1
JH
360}
361
ff9ef578
JH
362void hci_discovery_set_state(struct hci_dev *hdev, int state)
363{
364 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
365
366 if (hdev->discovery.state == state)
367 return;
368
369 switch (state) {
370 case DISCOVERY_STOPPED:
7b99b659
AG
371 if (hdev->discovery.state != DISCOVERY_STARTING)
372 mgmt_discovering(hdev, 0);
ff9ef578
JH
373 break;
374 case DISCOVERY_STARTING:
375 break;
343f935b 376 case DISCOVERY_FINDING:
ff9ef578
JH
377 mgmt_discovering(hdev, 1);
378 break;
30dc78e1
JH
379 case DISCOVERY_RESOLVING:
380 break;
ff9ef578
JH
381 case DISCOVERY_STOPPING:
382 break;
383 }
384
385 hdev->discovery.state = state;
386}
387
1da177e4
LT
388static void inquiry_cache_flush(struct hci_dev *hdev)
389{
30883512 390 struct discovery_state *cache = &hdev->discovery;
b57c1a56 391 struct inquiry_entry *p, *n;
1da177e4 392
561aafbc
JH
393 list_for_each_entry_safe(p, n, &cache->all, all) {
394 list_del(&p->all);
b57c1a56 395 kfree(p);
1da177e4 396 }
561aafbc
JH
397
398 INIT_LIST_HEAD(&cache->unknown);
399 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
400}
401
a8c5fb1a
GP
402struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
403 bdaddr_t *bdaddr)
1da177e4 404{
30883512 405 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
406 struct inquiry_entry *e;
407
408 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
409
561aafbc
JH
410 list_for_each_entry(e, &cache->all, all) {
411 if (!bacmp(&e->data.bdaddr, bdaddr))
412 return e;
413 }
414
415 return NULL;
416}
417
418struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 419 bdaddr_t *bdaddr)
561aafbc 420{
30883512 421 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
422 struct inquiry_entry *e;
423
424 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
425
426 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 427 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
428 return e;
429 }
430
431 return NULL;
1da177e4
LT
432}
433
30dc78e1 434struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
435 bdaddr_t *bdaddr,
436 int state)
30dc78e1
JH
437{
438 struct discovery_state *cache = &hdev->discovery;
439 struct inquiry_entry *e;
440
441 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
442
443 list_for_each_entry(e, &cache->resolve, list) {
444 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
445 return e;
446 if (!bacmp(&e->data.bdaddr, bdaddr))
447 return e;
448 }
449
450 return NULL;
451}
452
a3d4e20a 453void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 454 struct inquiry_entry *ie)
a3d4e20a
JH
455{
456 struct discovery_state *cache = &hdev->discovery;
457 struct list_head *pos = &cache->resolve;
458 struct inquiry_entry *p;
459
460 list_del(&ie->list);
461
462 list_for_each_entry(p, &cache->resolve, list) {
463 if (p->name_state != NAME_PENDING &&
a8c5fb1a 464 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
465 break;
466 pos = &p->list;
467 }
468
469 list_add(&ie->list, pos);
470}
471
3175405b 472bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 473 bool name_known, bool *ssp)
1da177e4 474{
30883512 475 struct discovery_state *cache = &hdev->discovery;
70f23020 476 struct inquiry_entry *ie;
1da177e4
LT
477
478 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
479
388fc8fa
JH
480 if (ssp)
481 *ssp = data->ssp_mode;
482
70f23020 483 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 484 if (ie) {
388fc8fa
JH
485 if (ie->data.ssp_mode && ssp)
486 *ssp = true;
487
a3d4e20a 488 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 489 data->rssi != ie->data.rssi) {
a3d4e20a
JH
490 ie->data.rssi = data->rssi;
491 hci_inquiry_cache_update_resolve(hdev, ie);
492 }
493
561aafbc 494 goto update;
a3d4e20a 495 }
561aafbc
JH
496
497 /* Entry not in the cache. Add new one. */
498 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
499 if (!ie)
3175405b 500 return false;
561aafbc
JH
501
502 list_add(&ie->all, &cache->all);
503
504 if (name_known) {
505 ie->name_state = NAME_KNOWN;
506 } else {
507 ie->name_state = NAME_NOT_KNOWN;
508 list_add(&ie->list, &cache->unknown);
509 }
70f23020 510
561aafbc
JH
511update:
512 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 513 ie->name_state != NAME_PENDING) {
561aafbc
JH
514 ie->name_state = NAME_KNOWN;
515 list_del(&ie->list);
1da177e4
LT
516 }
517
70f23020
AE
518 memcpy(&ie->data, data, sizeof(*data));
519 ie->timestamp = jiffies;
1da177e4 520 cache->timestamp = jiffies;
3175405b
JH
521
522 if (ie->name_state == NAME_NOT_KNOWN)
523 return false;
524
525 return true;
1da177e4
LT
526}
527
528static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
529{
30883512 530 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
531 struct inquiry_info *info = (struct inquiry_info *) buf;
532 struct inquiry_entry *e;
533 int copied = 0;
534
561aafbc 535 list_for_each_entry(e, &cache->all, all) {
1da177e4 536 struct inquiry_data *data = &e->data;
b57c1a56
JH
537
538 if (copied >= num)
539 break;
540
1da177e4
LT
541 bacpy(&info->bdaddr, &data->bdaddr);
542 info->pscan_rep_mode = data->pscan_rep_mode;
543 info->pscan_period_mode = data->pscan_period_mode;
544 info->pscan_mode = data->pscan_mode;
545 memcpy(info->dev_class, data->dev_class, 3);
546 info->clock_offset = data->clock_offset;
b57c1a56 547
1da177e4 548 info++;
b57c1a56 549 copied++;
1da177e4
LT
550 }
551
552 BT_DBG("cache %p, copied %d", cache, copied);
553 return copied;
554}
555
556static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
557{
558 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
559 struct hci_cp_inquiry cp;
560
561 BT_DBG("%s", hdev->name);
562
563 if (test_bit(HCI_INQUIRY, &hdev->flags))
564 return;
565
566 /* Start Inquiry */
567 memcpy(&cp.lap, &ir->lap, 3);
568 cp.length = ir->length;
569 cp.num_rsp = ir->num_rsp;
a9de9248 570 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
571}
572
573int hci_inquiry(void __user *arg)
574{
575 __u8 __user *ptr = arg;
576 struct hci_inquiry_req ir;
577 struct hci_dev *hdev;
578 int err = 0, do_inquiry = 0, max_rsp;
579 long timeo;
580 __u8 *buf;
581
582 if (copy_from_user(&ir, ptr, sizeof(ir)))
583 return -EFAULT;
584
5a08ecce
AE
585 hdev = hci_dev_get(ir.dev_id);
586 if (!hdev)
1da177e4
LT
587 return -ENODEV;
588
09fd0de5 589 hci_dev_lock(hdev);
8e87d142 590 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 591 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
592 inquiry_cache_flush(hdev);
593 do_inquiry = 1;
594 }
09fd0de5 595 hci_dev_unlock(hdev);
1da177e4 596
04837f64 597 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
598
599 if (do_inquiry) {
600 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
601 if (err < 0)
602 goto done;
603 }
1da177e4 604
8fc9ced3
GP
605 /* for unlimited number of responses we will use buffer with
606 * 255 entries
607 */
1da177e4
LT
608 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
609
610 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611 * copy it to the user space.
612 */
01df8c31 613 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 614 if (!buf) {
1da177e4
LT
615 err = -ENOMEM;
616 goto done;
617 }
618
09fd0de5 619 hci_dev_lock(hdev);
1da177e4 620 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 621 hci_dev_unlock(hdev);
1da177e4
LT
622
623 BT_DBG("num_rsp %d", ir.num_rsp);
624
625 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
626 ptr += sizeof(ir);
627 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 628 ir.num_rsp))
1da177e4 629 err = -EFAULT;
8e87d142 630 } else
1da177e4
LT
631 err = -EFAULT;
632
633 kfree(buf);
634
635done:
636 hci_dev_put(hdev);
637 return err;
638}
639
640/* ---- HCI ioctl helpers ---- */
641
642int hci_dev_open(__u16 dev)
643{
644 struct hci_dev *hdev;
645 int ret = 0;
646
5a08ecce
AE
647 hdev = hci_dev_get(dev);
648 if (!hdev)
1da177e4
LT
649 return -ENODEV;
650
651 BT_DBG("%s %p", hdev->name, hdev);
652
653 hci_req_lock(hdev);
654
94324962
JH
655 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
656 ret = -ENODEV;
657 goto done;
658 }
659
611b30f7
MH
660 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
661 ret = -ERFKILL;
662 goto done;
663 }
664
1da177e4
LT
665 if (test_bit(HCI_UP, &hdev->flags)) {
666 ret = -EALREADY;
667 goto done;
668 }
669
670 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
671 set_bit(HCI_RAW, &hdev->flags);
672
07e3b94a
AE
673 /* Treat all non BR/EDR controllers as raw devices if
674 enable_hs is not set */
675 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
676 set_bit(HCI_RAW, &hdev->flags);
677
1da177e4
LT
678 if (hdev->open(hdev)) {
679 ret = -EIO;
680 goto done;
681 }
682
683 if (!test_bit(HCI_RAW, &hdev->flags)) {
684 atomic_set(&hdev->cmd_cnt, 1);
685 set_bit(HCI_INIT, &hdev->flags);
a5040efa 686 hdev->init_last_cmd = 0;
1da177e4 687
5f246e89 688 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
1da177e4 689
eead27da 690 if (lmp_host_le_capable(hdev))
6ed58ec5 691 ret = __hci_request(hdev, hci_le_init_req, 0,
5f246e89 692 HCI_INIT_TIMEOUT);
6ed58ec5 693
1da177e4
LT
694 clear_bit(HCI_INIT, &hdev->flags);
695 }
696
697 if (!ret) {
698 hci_dev_hold(hdev);
699 set_bit(HCI_UP, &hdev->flags);
700 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a
AE
701 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
702 mgmt_valid_hdev(hdev)) {
09fd0de5 703 hci_dev_lock(hdev);
744cf19e 704 mgmt_powered(hdev, 1);
09fd0de5 705 hci_dev_unlock(hdev);
56e5cb86 706 }
8e87d142 707 } else {
1da177e4 708 /* Init failed, cleanup */
3eff45ea 709 flush_work(&hdev->tx_work);
c347b765 710 flush_work(&hdev->cmd_work);
b78752cc 711 flush_work(&hdev->rx_work);
1da177e4
LT
712
713 skb_queue_purge(&hdev->cmd_q);
714 skb_queue_purge(&hdev->rx_q);
715
716 if (hdev->flush)
717 hdev->flush(hdev);
718
719 if (hdev->sent_cmd) {
720 kfree_skb(hdev->sent_cmd);
721 hdev->sent_cmd = NULL;
722 }
723
724 hdev->close(hdev);
725 hdev->flags = 0;
726 }
727
728done:
729 hci_req_unlock(hdev);
730 hci_dev_put(hdev);
731 return ret;
732}
733
734static int hci_dev_do_close(struct hci_dev *hdev)
735{
736 BT_DBG("%s %p", hdev->name, hdev);
737
28b75a89
AG
738 cancel_work_sync(&hdev->le_scan);
739
78c04c0b
VCG
740 cancel_delayed_work(&hdev->power_off);
741
1da177e4
LT
742 hci_req_cancel(hdev, ENODEV);
743 hci_req_lock(hdev);
744
745 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 746 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
747 hci_req_unlock(hdev);
748 return 0;
749 }
750
3eff45ea
GP
751 /* Flush RX and TX works */
752 flush_work(&hdev->tx_work);
b78752cc 753 flush_work(&hdev->rx_work);
1da177e4 754
16ab91ab 755 if (hdev->discov_timeout > 0) {
e0f9309f 756 cancel_delayed_work(&hdev->discov_off);
16ab91ab 757 hdev->discov_timeout = 0;
5e5282bb 758 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
759 }
760
a8b2d5c2 761 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
762 cancel_delayed_work(&hdev->service_cache);
763
7ba8b4be
AG
764 cancel_delayed_work_sync(&hdev->le_scan_disable);
765
09fd0de5 766 hci_dev_lock(hdev);
1da177e4
LT
767 inquiry_cache_flush(hdev);
768 hci_conn_hash_flush(hdev);
09fd0de5 769 hci_dev_unlock(hdev);
1da177e4
LT
770
771 hci_notify(hdev, HCI_DEV_DOWN);
772
773 if (hdev->flush)
774 hdev->flush(hdev);
775
776 /* Reset device */
777 skb_queue_purge(&hdev->cmd_q);
778 atomic_set(&hdev->cmd_cnt, 1);
8af59467 779 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 780 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 781 set_bit(HCI_INIT, &hdev->flags);
5f246e89 782 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
783 clear_bit(HCI_INIT, &hdev->flags);
784 }
785
c347b765
GP
786 /* flush cmd work */
787 flush_work(&hdev->cmd_work);
1da177e4
LT
788
789 /* Drop queues */
790 skb_queue_purge(&hdev->rx_q);
791 skb_queue_purge(&hdev->cmd_q);
792 skb_queue_purge(&hdev->raw_q);
793
794 /* Drop last sent command */
795 if (hdev->sent_cmd) {
b79f44c1 796 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
797 kfree_skb(hdev->sent_cmd);
798 hdev->sent_cmd = NULL;
799 }
800
801 /* After this point our queues are empty
802 * and no tasks are scheduled. */
803 hdev->close(hdev);
804
bb4b2a9a
AE
805 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
806 mgmt_valid_hdev(hdev)) {
8ee56540
MH
807 hci_dev_lock(hdev);
808 mgmt_powered(hdev, 0);
809 hci_dev_unlock(hdev);
810 }
5add6af8 811
1da177e4
LT
812 /* Clear flags */
813 hdev->flags = 0;
814
e59fda8d 815 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 816 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 817
1da177e4
LT
818 hci_req_unlock(hdev);
819
820 hci_dev_put(hdev);
821 return 0;
822}
823
824int hci_dev_close(__u16 dev)
825{
826 struct hci_dev *hdev;
827 int err;
828
70f23020
AE
829 hdev = hci_dev_get(dev);
830 if (!hdev)
1da177e4 831 return -ENODEV;
8ee56540
MH
832
833 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
834 cancel_delayed_work(&hdev->power_off);
835
1da177e4 836 err = hci_dev_do_close(hdev);
8ee56540 837
1da177e4
LT
838 hci_dev_put(hdev);
839 return err;
840}
841
842int hci_dev_reset(__u16 dev)
843{
844 struct hci_dev *hdev;
845 int ret = 0;
846
70f23020
AE
847 hdev = hci_dev_get(dev);
848 if (!hdev)
1da177e4
LT
849 return -ENODEV;
850
851 hci_req_lock(hdev);
1da177e4
LT
852
853 if (!test_bit(HCI_UP, &hdev->flags))
854 goto done;
855
856 /* Drop queues */
857 skb_queue_purge(&hdev->rx_q);
858 skb_queue_purge(&hdev->cmd_q);
859
09fd0de5 860 hci_dev_lock(hdev);
1da177e4
LT
861 inquiry_cache_flush(hdev);
862 hci_conn_hash_flush(hdev);
09fd0de5 863 hci_dev_unlock(hdev);
1da177e4
LT
864
865 if (hdev->flush)
866 hdev->flush(hdev);
867
8e87d142 868 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 869 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
870
871 if (!test_bit(HCI_RAW, &hdev->flags))
5f246e89 872 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
873
874done:
1da177e4
LT
875 hci_req_unlock(hdev);
876 hci_dev_put(hdev);
877 return ret;
878}
879
880int hci_dev_reset_stat(__u16 dev)
881{
882 struct hci_dev *hdev;
883 int ret = 0;
884
70f23020
AE
885 hdev = hci_dev_get(dev);
886 if (!hdev)
1da177e4
LT
887 return -ENODEV;
888
889 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
890
891 hci_dev_put(hdev);
892
893 return ret;
894}
895
896int hci_dev_cmd(unsigned int cmd, void __user *arg)
897{
898 struct hci_dev *hdev;
899 struct hci_dev_req dr;
900 int err = 0;
901
902 if (copy_from_user(&dr, arg, sizeof(dr)))
903 return -EFAULT;
904
70f23020
AE
905 hdev = hci_dev_get(dr.dev_id);
906 if (!hdev)
1da177e4
LT
907 return -ENODEV;
908
909 switch (cmd) {
910 case HCISETAUTH:
04837f64 911 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
5f246e89 912 HCI_INIT_TIMEOUT);
1da177e4
LT
913 break;
914
915 case HCISETENCRYPT:
916 if (!lmp_encrypt_capable(hdev)) {
917 err = -EOPNOTSUPP;
918 break;
919 }
920
921 if (!test_bit(HCI_AUTH, &hdev->flags)) {
922 /* Auth must be enabled first */
04837f64 923 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
5f246e89 924 HCI_INIT_TIMEOUT);
1da177e4
LT
925 if (err)
926 break;
927 }
928
04837f64 929 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
5f246e89 930 HCI_INIT_TIMEOUT);
1da177e4
LT
931 break;
932
933 case HCISETSCAN:
04837f64 934 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
5f246e89 935 HCI_INIT_TIMEOUT);
1da177e4
LT
936 break;
937
1da177e4 938 case HCISETLINKPOL:
e4e8e37c 939 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
5f246e89 940 HCI_INIT_TIMEOUT);
1da177e4
LT
941 break;
942
943 case HCISETLINKMODE:
e4e8e37c
MH
944 hdev->link_mode = ((__u16) dr.dev_opt) &
945 (HCI_LM_MASTER | HCI_LM_ACCEPT);
946 break;
947
948 case HCISETPTYPE:
949 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
950 break;
951
952 case HCISETACLMTU:
e4e8e37c
MH
953 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
954 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
955 break;
956
957 case HCISETSCOMTU:
e4e8e37c
MH
958 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
959 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
960 break;
961
962 default:
963 err = -EINVAL;
964 break;
965 }
e4e8e37c 966
1da177e4
LT
967 hci_dev_put(hdev);
968 return err;
969}
970
971int hci_get_dev_list(void __user *arg)
972{
8035ded4 973 struct hci_dev *hdev;
1da177e4
LT
974 struct hci_dev_list_req *dl;
975 struct hci_dev_req *dr;
1da177e4
LT
976 int n = 0, size, err;
977 __u16 dev_num;
978
979 if (get_user(dev_num, (__u16 __user *) arg))
980 return -EFAULT;
981
982 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
983 return -EINVAL;
984
985 size = sizeof(*dl) + dev_num * sizeof(*dr);
986
70f23020
AE
987 dl = kzalloc(size, GFP_KERNEL);
988 if (!dl)
1da177e4
LT
989 return -ENOMEM;
990
991 dr = dl->dev_req;
992
f20d09d5 993 read_lock(&hci_dev_list_lock);
8035ded4 994 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 995 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 996 cancel_delayed_work(&hdev->power_off);
c542a06c 997
a8b2d5c2
JH
998 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
999 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1000
1da177e4
LT
1001 (dr + n)->dev_id = hdev->id;
1002 (dr + n)->dev_opt = hdev->flags;
c542a06c 1003
1da177e4
LT
1004 if (++n >= dev_num)
1005 break;
1006 }
f20d09d5 1007 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1008
1009 dl->dev_num = n;
1010 size = sizeof(*dl) + n * sizeof(*dr);
1011
1012 err = copy_to_user(arg, dl, size);
1013 kfree(dl);
1014
1015 return err ? -EFAULT : 0;
1016}
1017
1018int hci_get_dev_info(void __user *arg)
1019{
1020 struct hci_dev *hdev;
1021 struct hci_dev_info di;
1022 int err = 0;
1023
1024 if (copy_from_user(&di, arg, sizeof(di)))
1025 return -EFAULT;
1026
70f23020
AE
1027 hdev = hci_dev_get(di.dev_id);
1028 if (!hdev)
1da177e4
LT
1029 return -ENODEV;
1030
a8b2d5c2 1031 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1032 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1033
a8b2d5c2
JH
1034 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1035 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1036
1da177e4
LT
1037 strcpy(di.name, hdev->name);
1038 di.bdaddr = hdev->bdaddr;
943da25d 1039 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1040 di.flags = hdev->flags;
1041 di.pkt_type = hdev->pkt_type;
1042 di.acl_mtu = hdev->acl_mtu;
1043 di.acl_pkts = hdev->acl_pkts;
1044 di.sco_mtu = hdev->sco_mtu;
1045 di.sco_pkts = hdev->sco_pkts;
1046 di.link_policy = hdev->link_policy;
1047 di.link_mode = hdev->link_mode;
1048
1049 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1050 memcpy(&di.features, &hdev->features, sizeof(di.features));
1051
1052 if (copy_to_user(arg, &di, sizeof(di)))
1053 err = -EFAULT;
1054
1055 hci_dev_put(hdev);
1056
1057 return err;
1058}
1059
1060/* ---- Interface to HCI drivers ---- */
1061
611b30f7
MH
1062static int hci_rfkill_set_block(void *data, bool blocked)
1063{
1064 struct hci_dev *hdev = data;
1065
1066 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1067
1068 if (!blocked)
1069 return 0;
1070
1071 hci_dev_do_close(hdev);
1072
1073 return 0;
1074}
1075
1076static const struct rfkill_ops hci_rfkill_ops = {
1077 .set_block = hci_rfkill_set_block,
1078};
1079
ab81cbf9
JH
1080static void hci_power_on(struct work_struct *work)
1081{
1082 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1083
1084 BT_DBG("%s", hdev->name);
1085
1086 if (hci_dev_open(hdev->id) < 0)
1087 return;
1088
a8b2d5c2 1089 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
9345d40c 1090 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1091
a8b2d5c2 1092 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1093 mgmt_index_added(hdev);
ab81cbf9
JH
1094}
1095
1096static void hci_power_off(struct work_struct *work)
1097{
3243553f 1098 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1099 power_off.work);
ab81cbf9
JH
1100
1101 BT_DBG("%s", hdev->name);
1102
8ee56540 1103 hci_dev_do_close(hdev);
ab81cbf9
JH
1104}
1105
16ab91ab
JH
1106static void hci_discov_off(struct work_struct *work)
1107{
1108 struct hci_dev *hdev;
1109 u8 scan = SCAN_PAGE;
1110
1111 hdev = container_of(work, struct hci_dev, discov_off.work);
1112
1113 BT_DBG("%s", hdev->name);
1114
09fd0de5 1115 hci_dev_lock(hdev);
16ab91ab
JH
1116
1117 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1118
1119 hdev->discov_timeout = 0;
1120
09fd0de5 1121 hci_dev_unlock(hdev);
16ab91ab
JH
1122}
1123
2aeb9a1a
JH
1124int hci_uuids_clear(struct hci_dev *hdev)
1125{
1126 struct list_head *p, *n;
1127
1128 list_for_each_safe(p, n, &hdev->uuids) {
1129 struct bt_uuid *uuid;
1130
1131 uuid = list_entry(p, struct bt_uuid, list);
1132
1133 list_del(p);
1134 kfree(uuid);
1135 }
1136
1137 return 0;
1138}
1139
55ed8ca1
JH
1140int hci_link_keys_clear(struct hci_dev *hdev)
1141{
1142 struct list_head *p, *n;
1143
1144 list_for_each_safe(p, n, &hdev->link_keys) {
1145 struct link_key *key;
1146
1147 key = list_entry(p, struct link_key, list);
1148
1149 list_del(p);
1150 kfree(key);
1151 }
1152
1153 return 0;
1154}
1155
b899efaf
VCG
1156int hci_smp_ltks_clear(struct hci_dev *hdev)
1157{
1158 struct smp_ltk *k, *tmp;
1159
1160 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1161 list_del(&k->list);
1162 kfree(k);
1163 }
1164
1165 return 0;
1166}
1167
55ed8ca1
JH
1168struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1169{
8035ded4 1170 struct link_key *k;
55ed8ca1 1171
8035ded4 1172 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1173 if (bacmp(bdaddr, &k->bdaddr) == 0)
1174 return k;
55ed8ca1
JH
1175
1176 return NULL;
1177}
1178
745c0ce3 1179static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1180 u8 key_type, u8 old_key_type)
d25e28ab
JH
1181{
1182 /* Legacy key */
1183 if (key_type < 0x03)
745c0ce3 1184 return true;
d25e28ab
JH
1185
1186 /* Debug keys are insecure so don't store them persistently */
1187 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1188 return false;
d25e28ab
JH
1189
1190 /* Changed combination key and there's no previous one */
1191 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1192 return false;
d25e28ab
JH
1193
1194 /* Security mode 3 case */
1195 if (!conn)
745c0ce3 1196 return true;
d25e28ab
JH
1197
1198 /* Neither local nor remote side had no-bonding as requirement */
1199 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1200 return true;
d25e28ab
JH
1201
1202 /* Local side had dedicated bonding as requirement */
1203 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1204 return true;
d25e28ab
JH
1205
1206 /* Remote side had dedicated bonding as requirement */
1207 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1208 return true;
d25e28ab
JH
1209
1210 /* If none of the above criteria match, then don't store the key
1211 * persistently */
745c0ce3 1212 return false;
d25e28ab
JH
1213}
1214
c9839a11 1215struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1216{
c9839a11 1217 struct smp_ltk *k;
75d262c2 1218
c9839a11
VCG
1219 list_for_each_entry(k, &hdev->long_term_keys, list) {
1220 if (k->ediv != ediv ||
a8c5fb1a 1221 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1222 continue;
1223
c9839a11 1224 return k;
75d262c2
VCG
1225 }
1226
1227 return NULL;
1228}
75d262c2 1229
c9839a11 1230struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1231 u8 addr_type)
75d262c2 1232{
c9839a11 1233 struct smp_ltk *k;
75d262c2 1234
c9839a11
VCG
1235 list_for_each_entry(k, &hdev->long_term_keys, list)
1236 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1237 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1238 return k;
1239
1240 return NULL;
1241}
75d262c2 1242
d25e28ab 1243int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1244 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1245{
1246 struct link_key *key, *old_key;
745c0ce3
VA
1247 u8 old_key_type;
1248 bool persistent;
55ed8ca1
JH
1249
1250 old_key = hci_find_link_key(hdev, bdaddr);
1251 if (old_key) {
1252 old_key_type = old_key->type;
1253 key = old_key;
1254 } else {
12adcf3a 1255 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1256 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1257 if (!key)
1258 return -ENOMEM;
1259 list_add(&key->list, &hdev->link_keys);
1260 }
1261
1262 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1263
d25e28ab
JH
1264 /* Some buggy controller combinations generate a changed
1265 * combination key for legacy pairing even when there's no
1266 * previous key */
1267 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1268 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1269 type = HCI_LK_COMBINATION;
655fe6ec
JH
1270 if (conn)
1271 conn->key_type = type;
1272 }
d25e28ab 1273
55ed8ca1 1274 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1275 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1276 key->pin_len = pin_len;
1277
b6020ba0 1278 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1279 key->type = old_key_type;
4748fed2
JH
1280 else
1281 key->type = type;
1282
4df378a1
JH
1283 if (!new_key)
1284 return 0;
1285
1286 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1287
744cf19e 1288 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1289
6ec5bcad
VA
1290 if (conn)
1291 conn->flush_key = !persistent;
55ed8ca1
JH
1292
1293 return 0;
1294}
1295
c9839a11 1296int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1297 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1298 ediv, u8 rand[8])
75d262c2 1299{
c9839a11 1300 struct smp_ltk *key, *old_key;
75d262c2 1301
c9839a11
VCG
1302 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1303 return 0;
75d262c2 1304
c9839a11
VCG
1305 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1306 if (old_key)
75d262c2 1307 key = old_key;
c9839a11
VCG
1308 else {
1309 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1310 if (!key)
1311 return -ENOMEM;
c9839a11 1312 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1313 }
1314
75d262c2 1315 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1316 key->bdaddr_type = addr_type;
1317 memcpy(key->val, tk, sizeof(key->val));
1318 key->authenticated = authenticated;
1319 key->ediv = ediv;
1320 key->enc_size = enc_size;
1321 key->type = type;
1322 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1323
c9839a11
VCG
1324 if (!new_key)
1325 return 0;
75d262c2 1326
261cc5aa
VCG
1327 if (type & HCI_SMP_LTK)
1328 mgmt_new_ltk(hdev, key, 1);
1329
75d262c2
VCG
1330 return 0;
1331}
1332
55ed8ca1
JH
1333int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1334{
1335 struct link_key *key;
1336
1337 key = hci_find_link_key(hdev, bdaddr);
1338 if (!key)
1339 return -ENOENT;
1340
1341 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1342
1343 list_del(&key->list);
1344 kfree(key);
1345
1346 return 0;
1347}
1348
b899efaf
VCG
1349int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1350{
1351 struct smp_ltk *k, *tmp;
1352
1353 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1354 if (bacmp(bdaddr, &k->bdaddr))
1355 continue;
1356
1357 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1358
1359 list_del(&k->list);
1360 kfree(k);
1361 }
1362
1363 return 0;
1364}
1365
6bd32326 1366/* HCI command timer function */
bda4f23a 1367static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1368{
1369 struct hci_dev *hdev = (void *) arg;
1370
bda4f23a
AE
1371 if (hdev->sent_cmd) {
1372 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1373 u16 opcode = __le16_to_cpu(sent->opcode);
1374
1375 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1376 } else {
1377 BT_ERR("%s command tx timeout", hdev->name);
1378 }
1379
6bd32326 1380 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1381 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1382}
1383
2763eda6 1384struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1385 bdaddr_t *bdaddr)
2763eda6
SJ
1386{
1387 struct oob_data *data;
1388
1389 list_for_each_entry(data, &hdev->remote_oob_data, list)
1390 if (bacmp(bdaddr, &data->bdaddr) == 0)
1391 return data;
1392
1393 return NULL;
1394}
1395
1396int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1397{
1398 struct oob_data *data;
1399
1400 data = hci_find_remote_oob_data(hdev, bdaddr);
1401 if (!data)
1402 return -ENOENT;
1403
1404 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1405
1406 list_del(&data->list);
1407 kfree(data);
1408
1409 return 0;
1410}
1411
1412int hci_remote_oob_data_clear(struct hci_dev *hdev)
1413{
1414 struct oob_data *data, *n;
1415
1416 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1417 list_del(&data->list);
1418 kfree(data);
1419 }
1420
1421 return 0;
1422}
1423
1424int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1425 u8 *randomizer)
2763eda6
SJ
1426{
1427 struct oob_data *data;
1428
1429 data = hci_find_remote_oob_data(hdev, bdaddr);
1430
1431 if (!data) {
1432 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1433 if (!data)
1434 return -ENOMEM;
1435
1436 bacpy(&data->bdaddr, bdaddr);
1437 list_add(&data->list, &hdev->remote_oob_data);
1438 }
1439
1440 memcpy(data->hash, hash, sizeof(data->hash));
1441 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1442
1443 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1444
1445 return 0;
1446}
1447
04124681 1448struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1449{
8035ded4 1450 struct bdaddr_list *b;
b2a66aad 1451
8035ded4 1452 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1453 if (bacmp(bdaddr, &b->bdaddr) == 0)
1454 return b;
b2a66aad
AJ
1455
1456 return NULL;
1457}
1458
1459int hci_blacklist_clear(struct hci_dev *hdev)
1460{
1461 struct list_head *p, *n;
1462
1463 list_for_each_safe(p, n, &hdev->blacklist) {
1464 struct bdaddr_list *b;
1465
1466 b = list_entry(p, struct bdaddr_list, list);
1467
1468 list_del(p);
1469 kfree(b);
1470 }
1471
1472 return 0;
1473}
1474
88c1fe4b 1475int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1476{
1477 struct bdaddr_list *entry;
b2a66aad
AJ
1478
1479 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1480 return -EBADF;
1481
5e762444
AJ
1482 if (hci_blacklist_lookup(hdev, bdaddr))
1483 return -EEXIST;
b2a66aad
AJ
1484
1485 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1486 if (!entry)
1487 return -ENOMEM;
b2a66aad
AJ
1488
1489 bacpy(&entry->bdaddr, bdaddr);
1490
1491 list_add(&entry->list, &hdev->blacklist);
1492
88c1fe4b 1493 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1494}
1495
88c1fe4b 1496int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1497{
1498 struct bdaddr_list *entry;
b2a66aad 1499
1ec918ce 1500 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1501 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1502
1503 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1504 if (!entry)
5e762444 1505 return -ENOENT;
b2a66aad
AJ
1506
1507 list_del(&entry->list);
1508 kfree(entry);
1509
88c1fe4b 1510 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1511}
1512
7ba8b4be
AG
1513static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1514{
1515 struct le_scan_params *param = (struct le_scan_params *) opt;
1516 struct hci_cp_le_set_scan_param cp;
1517
1518 memset(&cp, 0, sizeof(cp));
1519 cp.type = param->type;
1520 cp.interval = cpu_to_le16(param->interval);
1521 cp.window = cpu_to_le16(param->window);
1522
1523 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1524}
1525
1526static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1527{
1528 struct hci_cp_le_set_scan_enable cp;
1529
1530 memset(&cp, 0, sizeof(cp));
1531 cp.enable = 1;
0431a43c 1532 cp.filter_dup = 1;
7ba8b4be
AG
1533
1534 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1535}
1536
1537static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1538 u16 window, int timeout)
7ba8b4be
AG
1539{
1540 long timeo = msecs_to_jiffies(3000);
1541 struct le_scan_params param;
1542 int err;
1543
1544 BT_DBG("%s", hdev->name);
1545
1546 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1547 return -EINPROGRESS;
1548
1549 param.type = type;
1550 param.interval = interval;
1551 param.window = window;
1552
1553 hci_req_lock(hdev);
1554
1555 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
04124681 1556 timeo);
7ba8b4be
AG
1557 if (!err)
1558 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1559
1560 hci_req_unlock(hdev);
1561
1562 if (err < 0)
1563 return err;
1564
1565 schedule_delayed_work(&hdev->le_scan_disable,
04124681 1566 msecs_to_jiffies(timeout));
7ba8b4be
AG
1567
1568 return 0;
1569}
1570
7dbfac1d
AG
1571int hci_cancel_le_scan(struct hci_dev *hdev)
1572{
1573 BT_DBG("%s", hdev->name);
1574
1575 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1576 return -EALREADY;
1577
1578 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1579 struct hci_cp_le_set_scan_enable cp;
1580
1581 /* Send HCI command to disable LE Scan */
1582 memset(&cp, 0, sizeof(cp));
1583 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1584 }
1585
1586 return 0;
1587}
1588
7ba8b4be
AG
1589static void le_scan_disable_work(struct work_struct *work)
1590{
1591 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1592 le_scan_disable.work);
7ba8b4be
AG
1593 struct hci_cp_le_set_scan_enable cp;
1594
1595 BT_DBG("%s", hdev->name);
1596
1597 memset(&cp, 0, sizeof(cp));
1598
1599 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1600}
1601
28b75a89
AG
1602static void le_scan_work(struct work_struct *work)
1603{
1604 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1605 struct le_scan_params *param = &hdev->le_scan_params;
1606
1607 BT_DBG("%s", hdev->name);
1608
04124681
GP
1609 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1610 param->timeout);
28b75a89
AG
1611}
1612
1613int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1614 int timeout)
28b75a89
AG
1615{
1616 struct le_scan_params *param = &hdev->le_scan_params;
1617
1618 BT_DBG("%s", hdev->name);
1619
1620 if (work_busy(&hdev->le_scan))
1621 return -EINPROGRESS;
1622
1623 param->type = type;
1624 param->interval = interval;
1625 param->window = window;
1626 param->timeout = timeout;
1627
1628 queue_work(system_long_wq, &hdev->le_scan);
1629
1630 return 0;
1631}
1632
9be0dab7
DH
1633/* Alloc HCI device */
1634struct hci_dev *hci_alloc_dev(void)
1635{
1636 struct hci_dev *hdev;
1637
1638 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1639 if (!hdev)
1640 return NULL;
1641
b1b813d4
DH
1642 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1643 hdev->esco_type = (ESCO_HV1);
1644 hdev->link_mode = (HCI_LM_ACCEPT);
1645 hdev->io_capability = 0x03; /* No Input No Output */
1646
b1b813d4
DH
1647 hdev->sniff_max_interval = 800;
1648 hdev->sniff_min_interval = 80;
1649
1650 mutex_init(&hdev->lock);
1651 mutex_init(&hdev->req_lock);
1652
1653 INIT_LIST_HEAD(&hdev->mgmt_pending);
1654 INIT_LIST_HEAD(&hdev->blacklist);
1655 INIT_LIST_HEAD(&hdev->uuids);
1656 INIT_LIST_HEAD(&hdev->link_keys);
1657 INIT_LIST_HEAD(&hdev->long_term_keys);
1658 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 1659 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
1660
1661 INIT_WORK(&hdev->rx_work, hci_rx_work);
1662 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1663 INIT_WORK(&hdev->tx_work, hci_tx_work);
1664 INIT_WORK(&hdev->power_on, hci_power_on);
1665 INIT_WORK(&hdev->le_scan, le_scan_work);
1666
b1b813d4
DH
1667 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1668 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1669 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1670
9be0dab7 1671 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
1672 skb_queue_head_init(&hdev->rx_q);
1673 skb_queue_head_init(&hdev->cmd_q);
1674 skb_queue_head_init(&hdev->raw_q);
1675
1676 init_waitqueue_head(&hdev->req_wait_q);
1677
bda4f23a 1678 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 1679
b1b813d4
DH
1680 hci_init_sysfs(hdev);
1681 discovery_init(hdev);
9be0dab7
DH
1682
1683 return hdev;
1684}
1685EXPORT_SYMBOL(hci_alloc_dev);
1686
1687/* Free HCI device */
1688void hci_free_dev(struct hci_dev *hdev)
1689{
1690 skb_queue_purge(&hdev->driver_init);
1691
1692 /* will free via device release */
1693 put_device(&hdev->dev);
1694}
1695EXPORT_SYMBOL(hci_free_dev);
1696
1da177e4
LT
1697/* Register HCI device */
1698int hci_register_dev(struct hci_dev *hdev)
1699{
b1b813d4 1700 int id, error;
1da177e4 1701
010666a1 1702 if (!hdev->open || !hdev->close)
1da177e4
LT
1703 return -EINVAL;
1704
08add513
MM
1705 /* Do not allow HCI_AMP devices to register at index 0,
1706 * so the index can be used as the AMP controller ID.
1707 */
3df92b31
SL
1708 switch (hdev->dev_type) {
1709 case HCI_BREDR:
1710 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1711 break;
1712 case HCI_AMP:
1713 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1714 break;
1715 default:
1716 return -EINVAL;
1da177e4 1717 }
8e87d142 1718
3df92b31
SL
1719 if (id < 0)
1720 return id;
1721
1da177e4
LT
1722 sprintf(hdev->name, "hci%d", id);
1723 hdev->id = id;
2d8b3a11
AE
1724
1725 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1726
3df92b31
SL
1727 write_lock(&hci_dev_list_lock);
1728 list_add(&hdev->list, &hci_dev_list);
f20d09d5 1729 write_unlock(&hci_dev_list_lock);
1da177e4 1730
32845eb1 1731 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 1732 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1733 if (!hdev->workqueue) {
1734 error = -ENOMEM;
1735 goto err;
1736 }
f48fd9c8 1737
33ca954d
DH
1738 error = hci_add_sysfs(hdev);
1739 if (error < 0)
1740 goto err_wqueue;
1da177e4 1741
611b30f7 1742 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
1743 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1744 hdev);
611b30f7
MH
1745 if (hdev->rfkill) {
1746 if (rfkill_register(hdev->rfkill) < 0) {
1747 rfkill_destroy(hdev->rfkill);
1748 hdev->rfkill = NULL;
1749 }
1750 }
1751
a8b2d5c2 1752 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
1753
1754 if (hdev->dev_type != HCI_AMP)
1755 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1756
7f971041 1757 schedule_work(&hdev->power_on);
ab81cbf9 1758
1da177e4 1759 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1760 hci_dev_hold(hdev);
1da177e4
LT
1761
1762 return id;
f48fd9c8 1763
33ca954d
DH
1764err_wqueue:
1765 destroy_workqueue(hdev->workqueue);
1766err:
3df92b31 1767 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 1768 write_lock(&hci_dev_list_lock);
f48fd9c8 1769 list_del(&hdev->list);
f20d09d5 1770 write_unlock(&hci_dev_list_lock);
f48fd9c8 1771
33ca954d 1772 return error;
1da177e4
LT
1773}
1774EXPORT_SYMBOL(hci_register_dev);
1775
1776/* Unregister HCI device */
59735631 1777void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1778{
3df92b31 1779 int i, id;
ef222013 1780
c13854ce 1781 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1782
94324962
JH
1783 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1784
3df92b31
SL
1785 id = hdev->id;
1786
f20d09d5 1787 write_lock(&hci_dev_list_lock);
1da177e4 1788 list_del(&hdev->list);
f20d09d5 1789 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1790
1791 hci_dev_do_close(hdev);
1792
cd4c5391 1793 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1794 kfree_skb(hdev->reassembly[i]);
1795
ab81cbf9 1796 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 1797 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1798 hci_dev_lock(hdev);
744cf19e 1799 mgmt_index_removed(hdev);
09fd0de5 1800 hci_dev_unlock(hdev);
56e5cb86 1801 }
ab81cbf9 1802
2e58ef3e
JH
1803 /* mgmt_index_removed should take care of emptying the
1804 * pending list */
1805 BUG_ON(!list_empty(&hdev->mgmt_pending));
1806
1da177e4
LT
1807 hci_notify(hdev, HCI_DEV_UNREG);
1808
611b30f7
MH
1809 if (hdev->rfkill) {
1810 rfkill_unregister(hdev->rfkill);
1811 rfkill_destroy(hdev->rfkill);
1812 }
1813
ce242970 1814 hci_del_sysfs(hdev);
147e2d59 1815
f48fd9c8
MH
1816 destroy_workqueue(hdev->workqueue);
1817
09fd0de5 1818 hci_dev_lock(hdev);
e2e0cacb 1819 hci_blacklist_clear(hdev);
2aeb9a1a 1820 hci_uuids_clear(hdev);
55ed8ca1 1821 hci_link_keys_clear(hdev);
b899efaf 1822 hci_smp_ltks_clear(hdev);
2763eda6 1823 hci_remote_oob_data_clear(hdev);
09fd0de5 1824 hci_dev_unlock(hdev);
e2e0cacb 1825
dc946bd8 1826 hci_dev_put(hdev);
3df92b31
SL
1827
1828 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
1829}
1830EXPORT_SYMBOL(hci_unregister_dev);
1831
1832/* Suspend HCI device */
1833int hci_suspend_dev(struct hci_dev *hdev)
1834{
1835 hci_notify(hdev, HCI_DEV_SUSPEND);
1836 return 0;
1837}
1838EXPORT_SYMBOL(hci_suspend_dev);
1839
1840/* Resume HCI device */
1841int hci_resume_dev(struct hci_dev *hdev)
1842{
1843 hci_notify(hdev, HCI_DEV_RESUME);
1844 return 0;
1845}
1846EXPORT_SYMBOL(hci_resume_dev);
1847
76bca880
MH
1848/* Receive frame from HCI drivers */
1849int hci_recv_frame(struct sk_buff *skb)
1850{
1851 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1852 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 1853 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
1854 kfree_skb(skb);
1855 return -ENXIO;
1856 }
1857
1858 /* Incomming skb */
1859 bt_cb(skb)->incoming = 1;
1860
1861 /* Time stamp */
1862 __net_timestamp(skb);
1863
76bca880 1864 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1865 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1866
76bca880
MH
1867 return 0;
1868}
1869EXPORT_SYMBOL(hci_recv_frame);
1870
33e882a5 1871static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 1872 int count, __u8 index)
33e882a5
SS
1873{
1874 int len = 0;
1875 int hlen = 0;
1876 int remain = count;
1877 struct sk_buff *skb;
1878 struct bt_skb_cb *scb;
1879
1880 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 1881 index >= NUM_REASSEMBLY)
33e882a5
SS
1882 return -EILSEQ;
1883
1884 skb = hdev->reassembly[index];
1885
1886 if (!skb) {
1887 switch (type) {
1888 case HCI_ACLDATA_PKT:
1889 len = HCI_MAX_FRAME_SIZE;
1890 hlen = HCI_ACL_HDR_SIZE;
1891 break;
1892 case HCI_EVENT_PKT:
1893 len = HCI_MAX_EVENT_SIZE;
1894 hlen = HCI_EVENT_HDR_SIZE;
1895 break;
1896 case HCI_SCODATA_PKT:
1897 len = HCI_MAX_SCO_SIZE;
1898 hlen = HCI_SCO_HDR_SIZE;
1899 break;
1900 }
1901
1e429f38 1902 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1903 if (!skb)
1904 return -ENOMEM;
1905
1906 scb = (void *) skb->cb;
1907 scb->expect = hlen;
1908 scb->pkt_type = type;
1909
1910 skb->dev = (void *) hdev;
1911 hdev->reassembly[index] = skb;
1912 }
1913
1914 while (count) {
1915 scb = (void *) skb->cb;
89bb46d0 1916 len = min_t(uint, scb->expect, count);
33e882a5
SS
1917
1918 memcpy(skb_put(skb, len), data, len);
1919
1920 count -= len;
1921 data += len;
1922 scb->expect -= len;
1923 remain = count;
1924
1925 switch (type) {
1926 case HCI_EVENT_PKT:
1927 if (skb->len == HCI_EVENT_HDR_SIZE) {
1928 struct hci_event_hdr *h = hci_event_hdr(skb);
1929 scb->expect = h->plen;
1930
1931 if (skb_tailroom(skb) < scb->expect) {
1932 kfree_skb(skb);
1933 hdev->reassembly[index] = NULL;
1934 return -ENOMEM;
1935 }
1936 }
1937 break;
1938
1939 case HCI_ACLDATA_PKT:
1940 if (skb->len == HCI_ACL_HDR_SIZE) {
1941 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1942 scb->expect = __le16_to_cpu(h->dlen);
1943
1944 if (skb_tailroom(skb) < scb->expect) {
1945 kfree_skb(skb);
1946 hdev->reassembly[index] = NULL;
1947 return -ENOMEM;
1948 }
1949 }
1950 break;
1951
1952 case HCI_SCODATA_PKT:
1953 if (skb->len == HCI_SCO_HDR_SIZE) {
1954 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1955 scb->expect = h->dlen;
1956
1957 if (skb_tailroom(skb) < scb->expect) {
1958 kfree_skb(skb);
1959 hdev->reassembly[index] = NULL;
1960 return -ENOMEM;
1961 }
1962 }
1963 break;
1964 }
1965
1966 if (scb->expect == 0) {
1967 /* Complete frame */
1968
1969 bt_cb(skb)->pkt_type = type;
1970 hci_recv_frame(skb);
1971
1972 hdev->reassembly[index] = NULL;
1973 return remain;
1974 }
1975 }
1976
1977 return remain;
1978}
1979
ef222013
MH
1980int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1981{
f39a3c06
SS
1982 int rem = 0;
1983
ef222013
MH
1984 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1985 return -EILSEQ;
1986
da5f6c37 1987 while (count) {
1e429f38 1988 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1989 if (rem < 0)
1990 return rem;
ef222013 1991
f39a3c06
SS
1992 data += (count - rem);
1993 count = rem;
f81c6224 1994 }
ef222013 1995
f39a3c06 1996 return rem;
ef222013
MH
1997}
1998EXPORT_SYMBOL(hci_recv_fragment);
1999
99811510
SS
2000#define STREAM_REASSEMBLY 0
2001
2002int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2003{
2004 int type;
2005 int rem = 0;
2006
da5f6c37 2007 while (count) {
99811510
SS
2008 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2009
2010 if (!skb) {
2011 struct { char type; } *pkt;
2012
2013 /* Start of the frame */
2014 pkt = data;
2015 type = pkt->type;
2016
2017 data++;
2018 count--;
2019 } else
2020 type = bt_cb(skb)->pkt_type;
2021
1e429f38 2022 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2023 STREAM_REASSEMBLY);
99811510
SS
2024 if (rem < 0)
2025 return rem;
2026
2027 data += (count - rem);
2028 count = rem;
f81c6224 2029 }
99811510
SS
2030
2031 return rem;
2032}
2033EXPORT_SYMBOL(hci_recv_stream_fragment);
2034
1da177e4
LT
2035/* ---- Interface to upper protocols ---- */
2036
1da177e4
LT
2037int hci_register_cb(struct hci_cb *cb)
2038{
2039 BT_DBG("%p name %s", cb, cb->name);
2040
f20d09d5 2041 write_lock(&hci_cb_list_lock);
1da177e4 2042 list_add(&cb->list, &hci_cb_list);
f20d09d5 2043 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2044
2045 return 0;
2046}
2047EXPORT_SYMBOL(hci_register_cb);
2048
2049int hci_unregister_cb(struct hci_cb *cb)
2050{
2051 BT_DBG("%p name %s", cb, cb->name);
2052
f20d09d5 2053 write_lock(&hci_cb_list_lock);
1da177e4 2054 list_del(&cb->list);
f20d09d5 2055 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2056
2057 return 0;
2058}
2059EXPORT_SYMBOL(hci_unregister_cb);
2060
2061static int hci_send_frame(struct sk_buff *skb)
2062{
2063 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2064
2065 if (!hdev) {
2066 kfree_skb(skb);
2067 return -ENODEV;
2068 }
2069
0d48d939 2070 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2071
cd82e61c
MH
2072 /* Time stamp */
2073 __net_timestamp(skb);
1da177e4 2074
cd82e61c
MH
2075 /* Send copy to monitor */
2076 hci_send_to_monitor(hdev, skb);
2077
2078 if (atomic_read(&hdev->promisc)) {
2079 /* Send copy to the sockets */
470fe1b5 2080 hci_send_to_sock(hdev, skb);
1da177e4
LT
2081 }
2082
2083 /* Get rid of skb owner, prior to sending to the driver. */
2084 skb_orphan(skb);
2085
2086 return hdev->send(skb);
2087}
2088
2089/* Send HCI command */
a9de9248 2090int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2091{
2092 int len = HCI_COMMAND_HDR_SIZE + plen;
2093 struct hci_command_hdr *hdr;
2094 struct sk_buff *skb;
2095
f0e09510 2096 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2097
2098 skb = bt_skb_alloc(len, GFP_ATOMIC);
2099 if (!skb) {
ef222013 2100 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2101 return -ENOMEM;
2102 }
2103
2104 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2105 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2106 hdr->plen = plen;
2107
2108 if (plen)
2109 memcpy(skb_put(skb, plen), param, plen);
2110
2111 BT_DBG("skb len %d", skb->len);
2112
0d48d939 2113 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2114 skb->dev = (void *) hdev;
c78ae283 2115
a5040efa
JH
2116 if (test_bit(HCI_INIT, &hdev->flags))
2117 hdev->init_last_cmd = opcode;
2118
1da177e4 2119 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2120 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2121
2122 return 0;
2123}
1da177e4
LT
2124
2125/* Get data from the previously sent command */
a9de9248 2126void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2127{
2128 struct hci_command_hdr *hdr;
2129
2130 if (!hdev->sent_cmd)
2131 return NULL;
2132
2133 hdr = (void *) hdev->sent_cmd->data;
2134
a9de9248 2135 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2136 return NULL;
2137
f0e09510 2138 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2139
2140 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2141}
2142
2143/* Send ACL data */
2144static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2145{
2146 struct hci_acl_hdr *hdr;
2147 int len = skb->len;
2148
badff6d0
ACM
2149 skb_push(skb, HCI_ACL_HDR_SIZE);
2150 skb_reset_transport_header(skb);
9c70220b 2151 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2152 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2153 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2154}
2155
73d80deb 2156static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
a8c5fb1a 2157 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2158{
2159 struct hci_dev *hdev = conn->hdev;
2160 struct sk_buff *list;
2161
087bfd99
GP
2162 skb->len = skb_headlen(skb);
2163 skb->data_len = 0;
2164
2165 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2166 hci_add_acl_hdr(skb, conn->handle, flags);
2167
70f23020
AE
2168 list = skb_shinfo(skb)->frag_list;
2169 if (!list) {
1da177e4
LT
2170 /* Non fragmented */
2171 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2172
73d80deb 2173 skb_queue_tail(queue, skb);
1da177e4
LT
2174 } else {
2175 /* Fragmented */
2176 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2177
2178 skb_shinfo(skb)->frag_list = NULL;
2179
2180 /* Queue all fragments atomically */
af3e6359 2181 spin_lock(&queue->lock);
1da177e4 2182
73d80deb 2183 __skb_queue_tail(queue, skb);
e702112f
AE
2184
2185 flags &= ~ACL_START;
2186 flags |= ACL_CONT;
1da177e4
LT
2187 do {
2188 skb = list; list = list->next;
8e87d142 2189
1da177e4 2190 skb->dev = (void *) hdev;
0d48d939 2191 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2192 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2193
2194 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2195
73d80deb 2196 __skb_queue_tail(queue, skb);
1da177e4
LT
2197 } while (list);
2198
af3e6359 2199 spin_unlock(&queue->lock);
1da177e4 2200 }
73d80deb
LAD
2201}
2202
2203void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2204{
2205 struct hci_conn *conn = chan->conn;
2206 struct hci_dev *hdev = conn->hdev;
2207
f0e09510 2208 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2209
2210 skb->dev = (void *) hdev;
73d80deb
LAD
2211
2212 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2213
3eff45ea 2214 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2215}
1da177e4
LT
2216
2217/* Send SCO data */
0d861d8b 2218void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2219{
2220 struct hci_dev *hdev = conn->hdev;
2221 struct hci_sco_hdr hdr;
2222
2223 BT_DBG("%s len %d", hdev->name, skb->len);
2224
aca3192c 2225 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2226 hdr.dlen = skb->len;
2227
badff6d0
ACM
2228 skb_push(skb, HCI_SCO_HDR_SIZE);
2229 skb_reset_transport_header(skb);
9c70220b 2230 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2231
2232 skb->dev = (void *) hdev;
0d48d939 2233 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2234
1da177e4 2235 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2236 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2237}
1da177e4
LT
2238
2239/* ---- HCI TX task (outgoing data) ---- */
2240
2241/* HCI Connection scheduler */
6039aa73
GP
2242static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2243 int *quote)
1da177e4
LT
2244{
2245 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2246 struct hci_conn *conn = NULL, *c;
abc5de8f 2247 unsigned int num = 0, min = ~0;
1da177e4 2248
8e87d142 2249 /* We don't have to lock device here. Connections are always
1da177e4 2250 * added and removed with TX task disabled. */
bf4c6325
GP
2251
2252 rcu_read_lock();
2253
2254 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2255 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2256 continue;
769be974
MH
2257
2258 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2259 continue;
2260
1da177e4
LT
2261 num++;
2262
2263 if (c->sent < min) {
2264 min = c->sent;
2265 conn = c;
2266 }
52087a79
LAD
2267
2268 if (hci_conn_num(hdev, type) == num)
2269 break;
1da177e4
LT
2270 }
2271
bf4c6325
GP
2272 rcu_read_unlock();
2273
1da177e4 2274 if (conn) {
6ed58ec5
VT
2275 int cnt, q;
2276
2277 switch (conn->type) {
2278 case ACL_LINK:
2279 cnt = hdev->acl_cnt;
2280 break;
2281 case SCO_LINK:
2282 case ESCO_LINK:
2283 cnt = hdev->sco_cnt;
2284 break;
2285 case LE_LINK:
2286 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2287 break;
2288 default:
2289 cnt = 0;
2290 BT_ERR("Unknown link type");
2291 }
2292
2293 q = cnt / num;
1da177e4
LT
2294 *quote = q ? q : 1;
2295 } else
2296 *quote = 0;
2297
2298 BT_DBG("conn %p quote %d", conn, *quote);
2299 return conn;
2300}
2301
6039aa73 2302static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2303{
2304 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2305 struct hci_conn *c;
1da177e4 2306
bae1f5d9 2307 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2308
bf4c6325
GP
2309 rcu_read_lock();
2310
1da177e4 2311 /* Kill stalled connections */
bf4c6325 2312 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2313 if (c->type == type && c->sent) {
2314 BT_ERR("%s killing stalled connection %s",
a8c5fb1a 2315 hdev->name, batostr(&c->dst));
7490c6c2 2316 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2317 }
2318 }
bf4c6325
GP
2319
2320 rcu_read_unlock();
1da177e4
LT
2321}
2322
6039aa73
GP
2323static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2324 int *quote)
1da177e4 2325{
73d80deb
LAD
2326 struct hci_conn_hash *h = &hdev->conn_hash;
2327 struct hci_chan *chan = NULL;
abc5de8f 2328 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2329 struct hci_conn *conn;
73d80deb
LAD
2330 int cnt, q, conn_num = 0;
2331
2332 BT_DBG("%s", hdev->name);
2333
bf4c6325
GP
2334 rcu_read_lock();
2335
2336 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2337 struct hci_chan *tmp;
2338
2339 if (conn->type != type)
2340 continue;
2341
2342 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2343 continue;
2344
2345 conn_num++;
2346
8192edef 2347 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2348 struct sk_buff *skb;
2349
2350 if (skb_queue_empty(&tmp->data_q))
2351 continue;
2352
2353 skb = skb_peek(&tmp->data_q);
2354 if (skb->priority < cur_prio)
2355 continue;
2356
2357 if (skb->priority > cur_prio) {
2358 num = 0;
2359 min = ~0;
2360 cur_prio = skb->priority;
2361 }
2362
2363 num++;
2364
2365 if (conn->sent < min) {
2366 min = conn->sent;
2367 chan = tmp;
2368 }
2369 }
2370
2371 if (hci_conn_num(hdev, type) == conn_num)
2372 break;
2373 }
2374
bf4c6325
GP
2375 rcu_read_unlock();
2376
73d80deb
LAD
2377 if (!chan)
2378 return NULL;
2379
2380 switch (chan->conn->type) {
2381 case ACL_LINK:
2382 cnt = hdev->acl_cnt;
2383 break;
2384 case SCO_LINK:
2385 case ESCO_LINK:
2386 cnt = hdev->sco_cnt;
2387 break;
2388 case LE_LINK:
2389 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2390 break;
2391 default:
2392 cnt = 0;
2393 BT_ERR("Unknown link type");
2394 }
2395
2396 q = cnt / num;
2397 *quote = q ? q : 1;
2398 BT_DBG("chan %p quote %d", chan, *quote);
2399 return chan;
2400}
2401
02b20f0b
LAD
2402static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2403{
2404 struct hci_conn_hash *h = &hdev->conn_hash;
2405 struct hci_conn *conn;
2406 int num = 0;
2407
2408 BT_DBG("%s", hdev->name);
2409
bf4c6325
GP
2410 rcu_read_lock();
2411
2412 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2413 struct hci_chan *chan;
2414
2415 if (conn->type != type)
2416 continue;
2417
2418 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2419 continue;
2420
2421 num++;
2422
8192edef 2423 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2424 struct sk_buff *skb;
2425
2426 if (chan->sent) {
2427 chan->sent = 0;
2428 continue;
2429 }
2430
2431 if (skb_queue_empty(&chan->data_q))
2432 continue;
2433
2434 skb = skb_peek(&chan->data_q);
2435 if (skb->priority >= HCI_PRIO_MAX - 1)
2436 continue;
2437
2438 skb->priority = HCI_PRIO_MAX - 1;
2439
2440 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2441 skb->priority);
02b20f0b
LAD
2442 }
2443
2444 if (hci_conn_num(hdev, type) == num)
2445 break;
2446 }
bf4c6325
GP
2447
2448 rcu_read_unlock();
2449
02b20f0b
LAD
2450}
2451
b71d385a
AE
2452static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2453{
2454 /* Calculate count of blocks used by this packet */
2455 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2456}
2457
6039aa73 2458static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2459{
1da177e4
LT
2460 if (!test_bit(HCI_RAW, &hdev->flags)) {
2461 /* ACL tx timeout must be longer than maximum
2462 * link supervision timeout (40.9 seconds) */
63d2bc1b 2463 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2464 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2465 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2466 }
63d2bc1b 2467}
1da177e4 2468
6039aa73 2469static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2470{
2471 unsigned int cnt = hdev->acl_cnt;
2472 struct hci_chan *chan;
2473 struct sk_buff *skb;
2474 int quote;
2475
2476 __check_timeout(hdev, cnt);
04837f64 2477
73d80deb 2478 while (hdev->acl_cnt &&
a8c5fb1a 2479 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2480 u32 priority = (skb_peek(&chan->data_q))->priority;
2481 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2482 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2483 skb->len, skb->priority);
73d80deb 2484
ec1cce24
LAD
2485 /* Stop if priority has changed */
2486 if (skb->priority < priority)
2487 break;
2488
2489 skb = skb_dequeue(&chan->data_q);
2490
73d80deb 2491 hci_conn_enter_active_mode(chan->conn,
04124681 2492 bt_cb(skb)->force_active);
04837f64 2493
1da177e4
LT
2494 hci_send_frame(skb);
2495 hdev->acl_last_tx = jiffies;
2496
2497 hdev->acl_cnt--;
73d80deb
LAD
2498 chan->sent++;
2499 chan->conn->sent++;
1da177e4
LT
2500 }
2501 }
02b20f0b
LAD
2502
2503 if (cnt != hdev->acl_cnt)
2504 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2505}
2506
6039aa73 2507static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2508{
63d2bc1b 2509 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2510 struct hci_chan *chan;
2511 struct sk_buff *skb;
2512 int quote;
b71d385a 2513
63d2bc1b 2514 __check_timeout(hdev, cnt);
b71d385a
AE
2515
2516 while (hdev->block_cnt > 0 &&
a8c5fb1a 2517 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
b71d385a
AE
2518 u32 priority = (skb_peek(&chan->data_q))->priority;
2519 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2520 int blocks;
2521
2522 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2523 skb->len, skb->priority);
b71d385a
AE
2524
2525 /* Stop if priority has changed */
2526 if (skb->priority < priority)
2527 break;
2528
2529 skb = skb_dequeue(&chan->data_q);
2530
2531 blocks = __get_blocks(hdev, skb);
2532 if (blocks > hdev->block_cnt)
2533 return;
2534
2535 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2536 bt_cb(skb)->force_active);
b71d385a
AE
2537
2538 hci_send_frame(skb);
2539 hdev->acl_last_tx = jiffies;
2540
2541 hdev->block_cnt -= blocks;
2542 quote -= blocks;
2543
2544 chan->sent += blocks;
2545 chan->conn->sent += blocks;
2546 }
2547 }
2548
2549 if (cnt != hdev->block_cnt)
2550 hci_prio_recalculate(hdev, ACL_LINK);
2551}
2552
6039aa73 2553static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
2554{
2555 BT_DBG("%s", hdev->name);
2556
2557 if (!hci_conn_num(hdev, ACL_LINK))
2558 return;
2559
2560 switch (hdev->flow_ctl_mode) {
2561 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2562 hci_sched_acl_pkt(hdev);
2563 break;
2564
2565 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2566 hci_sched_acl_blk(hdev);
2567 break;
2568 }
2569}
2570
1da177e4 2571/* Schedule SCO */
6039aa73 2572static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
2573{
2574 struct hci_conn *conn;
2575 struct sk_buff *skb;
2576 int quote;
2577
2578 BT_DBG("%s", hdev->name);
2579
52087a79
LAD
2580 if (!hci_conn_num(hdev, SCO_LINK))
2581 return;
2582
1da177e4
LT
2583 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2584 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2585 BT_DBG("skb %p len %d", skb, skb->len);
2586 hci_send_frame(skb);
2587
2588 conn->sent++;
2589 if (conn->sent == ~0)
2590 conn->sent = 0;
2591 }
2592 }
2593}
2594
6039aa73 2595static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
2596{
2597 struct hci_conn *conn;
2598 struct sk_buff *skb;
2599 int quote;
2600
2601 BT_DBG("%s", hdev->name);
2602
52087a79
LAD
2603 if (!hci_conn_num(hdev, ESCO_LINK))
2604 return;
2605
8fc9ced3
GP
2606 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2607 &quote))) {
b6a0dc82
MH
2608 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2609 BT_DBG("skb %p len %d", skb, skb->len);
2610 hci_send_frame(skb);
2611
2612 conn->sent++;
2613 if (conn->sent == ~0)
2614 conn->sent = 0;
2615 }
2616 }
2617}
2618
6039aa73 2619static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 2620{
73d80deb 2621 struct hci_chan *chan;
6ed58ec5 2622 struct sk_buff *skb;
02b20f0b 2623 int quote, cnt, tmp;
6ed58ec5
VT
2624
2625 BT_DBG("%s", hdev->name);
2626
52087a79
LAD
2627 if (!hci_conn_num(hdev, LE_LINK))
2628 return;
2629
6ed58ec5
VT
2630 if (!test_bit(HCI_RAW, &hdev->flags)) {
2631 /* LE tx timeout must be longer than maximum
2632 * link supervision timeout (40.9 seconds) */
bae1f5d9 2633 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 2634 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2635 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2636 }
2637
2638 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2639 tmp = cnt;
73d80deb 2640 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2641 u32 priority = (skb_peek(&chan->data_q))->priority;
2642 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2643 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2644 skb->len, skb->priority);
6ed58ec5 2645
ec1cce24
LAD
2646 /* Stop if priority has changed */
2647 if (skb->priority < priority)
2648 break;
2649
2650 skb = skb_dequeue(&chan->data_q);
2651
6ed58ec5
VT
2652 hci_send_frame(skb);
2653 hdev->le_last_tx = jiffies;
2654
2655 cnt--;
73d80deb
LAD
2656 chan->sent++;
2657 chan->conn->sent++;
6ed58ec5
VT
2658 }
2659 }
73d80deb 2660
6ed58ec5
VT
2661 if (hdev->le_pkts)
2662 hdev->le_cnt = cnt;
2663 else
2664 hdev->acl_cnt = cnt;
02b20f0b
LAD
2665
2666 if (cnt != tmp)
2667 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2668}
2669
3eff45ea 2670static void hci_tx_work(struct work_struct *work)
1da177e4 2671{
3eff45ea 2672 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2673 struct sk_buff *skb;
2674
6ed58ec5 2675 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 2676 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2677
2678 /* Schedule queues and send stuff to HCI driver */
2679
2680 hci_sched_acl(hdev);
2681
2682 hci_sched_sco(hdev);
2683
b6a0dc82
MH
2684 hci_sched_esco(hdev);
2685
6ed58ec5
VT
2686 hci_sched_le(hdev);
2687
1da177e4
LT
2688 /* Send next queued raw (unknown type) packet */
2689 while ((skb = skb_dequeue(&hdev->raw_q)))
2690 hci_send_frame(skb);
1da177e4
LT
2691}
2692
25985edc 2693/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2694
2695/* ACL data packet */
6039aa73 2696static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2697{
2698 struct hci_acl_hdr *hdr = (void *) skb->data;
2699 struct hci_conn *conn;
2700 __u16 handle, flags;
2701
2702 skb_pull(skb, HCI_ACL_HDR_SIZE);
2703
2704 handle = __le16_to_cpu(hdr->handle);
2705 flags = hci_flags(handle);
2706 handle = hci_handle(handle);
2707
f0e09510 2708 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 2709 handle, flags);
1da177e4
LT
2710
2711 hdev->stat.acl_rx++;
2712
2713 hci_dev_lock(hdev);
2714 conn = hci_conn_hash_lookup_handle(hdev, handle);
2715 hci_dev_unlock(hdev);
8e87d142 2716
1da177e4 2717 if (conn) {
65983fc7 2718 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2719
671267bf
JH
2720 hci_dev_lock(hdev);
2721 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2722 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2723 mgmt_device_connected(hdev, &conn->dst, conn->type,
2724 conn->dst_type, 0, NULL, 0,
2725 conn->dev_class);
2726 hci_dev_unlock(hdev);
2727
1da177e4 2728 /* Send to upper protocol */
686ebf28
UF
2729 l2cap_recv_acldata(conn, skb, flags);
2730 return;
1da177e4 2731 } else {
8e87d142 2732 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 2733 hdev->name, handle);
1da177e4
LT
2734 }
2735
2736 kfree_skb(skb);
2737}
2738
2739/* SCO data packet */
6039aa73 2740static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2741{
2742 struct hci_sco_hdr *hdr = (void *) skb->data;
2743 struct hci_conn *conn;
2744 __u16 handle;
2745
2746 skb_pull(skb, HCI_SCO_HDR_SIZE);
2747
2748 handle = __le16_to_cpu(hdr->handle);
2749
f0e09510 2750 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
2751
2752 hdev->stat.sco_rx++;
2753
2754 hci_dev_lock(hdev);
2755 conn = hci_conn_hash_lookup_handle(hdev, handle);
2756 hci_dev_unlock(hdev);
2757
2758 if (conn) {
1da177e4 2759 /* Send to upper protocol */
686ebf28
UF
2760 sco_recv_scodata(conn, skb);
2761 return;
1da177e4 2762 } else {
8e87d142 2763 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 2764 hdev->name, handle);
1da177e4
LT
2765 }
2766
2767 kfree_skb(skb);
2768}
2769
b78752cc 2770static void hci_rx_work(struct work_struct *work)
1da177e4 2771{
b78752cc 2772 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2773 struct sk_buff *skb;
2774
2775 BT_DBG("%s", hdev->name);
2776
1da177e4 2777 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2778 /* Send copy to monitor */
2779 hci_send_to_monitor(hdev, skb);
2780
1da177e4
LT
2781 if (atomic_read(&hdev->promisc)) {
2782 /* Send copy to the sockets */
470fe1b5 2783 hci_send_to_sock(hdev, skb);
1da177e4
LT
2784 }
2785
2786 if (test_bit(HCI_RAW, &hdev->flags)) {
2787 kfree_skb(skb);
2788 continue;
2789 }
2790
2791 if (test_bit(HCI_INIT, &hdev->flags)) {
2792 /* Don't process data packets in this states. */
0d48d939 2793 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2794 case HCI_ACLDATA_PKT:
2795 case HCI_SCODATA_PKT:
2796 kfree_skb(skb);
2797 continue;
3ff50b79 2798 }
1da177e4
LT
2799 }
2800
2801 /* Process frame */
0d48d939 2802 switch (bt_cb(skb)->pkt_type) {
1da177e4 2803 case HCI_EVENT_PKT:
b78752cc 2804 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2805 hci_event_packet(hdev, skb);
2806 break;
2807
2808 case HCI_ACLDATA_PKT:
2809 BT_DBG("%s ACL data packet", hdev->name);
2810 hci_acldata_packet(hdev, skb);
2811 break;
2812
2813 case HCI_SCODATA_PKT:
2814 BT_DBG("%s SCO data packet", hdev->name);
2815 hci_scodata_packet(hdev, skb);
2816 break;
2817
2818 default:
2819 kfree_skb(skb);
2820 break;
2821 }
2822 }
1da177e4
LT
2823}
2824
c347b765 2825static void hci_cmd_work(struct work_struct *work)
1da177e4 2826{
c347b765 2827 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2828 struct sk_buff *skb;
2829
2104786b
AE
2830 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2831 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 2832
1da177e4 2833 /* Send queued commands */
5a08ecce
AE
2834 if (atomic_read(&hdev->cmd_cnt)) {
2835 skb = skb_dequeue(&hdev->cmd_q);
2836 if (!skb)
2837 return;
2838
7585b97a 2839 kfree_skb(hdev->sent_cmd);
1da177e4 2840
70f23020
AE
2841 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2842 if (hdev->sent_cmd) {
1da177e4
LT
2843 atomic_dec(&hdev->cmd_cnt);
2844 hci_send_frame(skb);
7bdb8a5c
SJ
2845 if (test_bit(HCI_RESET, &hdev->flags))
2846 del_timer(&hdev->cmd_timer);
2847 else
2848 mod_timer(&hdev->cmd_timer,
5f246e89 2849 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
2850 } else {
2851 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2852 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2853 }
2854 }
2855}
2519a1fc
AG
2856
2857int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2858{
2859 /* General inquiry access code (GIAC) */
2860 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2861 struct hci_cp_inquiry cp;
2862
2863 BT_DBG("%s", hdev->name);
2864
2865 if (test_bit(HCI_INQUIRY, &hdev->flags))
2866 return -EINPROGRESS;
2867
4663262c
JH
2868 inquiry_cache_flush(hdev);
2869
2519a1fc
AG
2870 memset(&cp, 0, sizeof(cp));
2871 memcpy(&cp.lap, lap, sizeof(cp.lap));
2872 cp.length = length;
2873
2874 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2875}
023d5049
AG
2876
2877int hci_cancel_inquiry(struct hci_dev *hdev)
2878{
2879 BT_DBG("%s", hdev->name);
2880
2881 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 2882 return -EALREADY;
023d5049
AG
2883
2884 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2885}
31f7956c
AG
2886
2887u8 bdaddr_to_le(u8 bdaddr_type)
2888{
2889 switch (bdaddr_type) {
2890 case BDADDR_LE_PUBLIC:
2891 return ADDR_LE_DEV_PUBLIC;
2892
2893 default:
2894 /* Fallback to LE Random address type */
2895 return ADDR_LE_DEV_RANDOM;
2896 }
2897}