Bluetooth: Force the process of unpair command if disconnect failed
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
23bb5763 60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 61{
f0e09510 62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
23bb5763 63
a5040efa
JH
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
75fb0e32
JH
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1036b890 69 u16 opcode = __le16_to_cpu(sent->opcode);
75fb0e32
JH
70 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
1036b890 79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
75fb0e32
JH
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
23bb5763 88 return;
75fb0e32 89 }
1da177e4
LT
90
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
a8c5fb1a
GP
110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
1da177e4
LT
113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
e175072f 134 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
3ff50b79 144 }
1da177e4 145
a5040efa 146 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
6039aa73
GP
153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
1da177e4
LT
156{
157 int ret;
158
7c6a329e
MH
159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
1da177e4
LT
162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
f630cf0d 175 set_bit(HCI_RESET, &hdev->flags);
a9de9248 176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
177}
178
e61ef499 179static void bredr_init(struct hci_dev *hdev)
1da177e4 180{
b0916ea0 181 struct hci_cp_delete_stored_link_key cp;
1ebb9252 182 __le16 param;
89f2783d 183 __u8 flt_type;
1da177e4 184
2455a3ea
AE
185 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
186
1da177e4
LT
187 /* Mandatory initialization */
188
1da177e4 189 /* Read Local Supported Features */
a9de9248 190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 191
1143e5a6 192 /* Read Local Version */
a9de9248 193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 194
1da177e4 195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 196 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 197
1da177e4 198 /* Read BD Address */
a9de9248
MH
199 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
200
201 /* Read Class of Device */
202 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
203
204 /* Read Local Name */
205 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
206
207 /* Read Voice Setting */
a9de9248 208 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
209
210 /* Optional initialization */
211
212 /* Clear Event Filters */
89f2783d 213 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 215
1da177e4 216 /* Connection accept timeout ~20 secs */
82781e63 217 param = __constant_cpu_to_le16(0x7d00);
a9de9248 218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
219
220 bacpy(&cp.bdaddr, BDADDR_ANY);
221 cp.delete_all = 1;
222 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
223}
224
e61ef499
AE
225static void amp_init(struct hci_dev *hdev)
226{
2455a3ea
AE
227 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
228
e61ef499
AE
229 /* Read Local Version */
230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
231
232 /* Read Local AMP Info */
233 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
234
235 /* Read Data Blk size */
236 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
237}
238
239static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
240{
241 struct sk_buff *skb;
242
243 BT_DBG("%s %ld", hdev->name, opt);
244
245 /* Driver initialization */
246
247 /* Special commands */
248 while ((skb = skb_dequeue(&hdev->driver_init))) {
249 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
250 skb->dev = (void *) hdev;
251
252 skb_queue_tail(&hdev->cmd_q, skb);
253 queue_work(hdev->workqueue, &hdev->cmd_work);
254 }
255 skb_queue_purge(&hdev->driver_init);
256
11778716
AE
257 /* Reset */
258 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
259 hci_reset_req(hdev, 0);
260
e61ef499
AE
261 switch (hdev->dev_type) {
262 case HCI_BREDR:
263 bredr_init(hdev);
264 break;
265
266 case HCI_AMP:
267 amp_init(hdev);
268 break;
269
270 default:
271 BT_ERR("Unknown device type %d", hdev->dev_type);
272 break;
273 }
e61ef499
AE
274}
275
6ed58ec5
VT
276static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
277{
278 BT_DBG("%s", hdev->name);
279
280 /* Read LE buffer size */
281 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
282}
283
1da177e4
LT
284static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
285{
286 __u8 scan = opt;
287
288 BT_DBG("%s %x", hdev->name, scan);
289
290 /* Inquiry and Page scans */
a9de9248 291 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
292}
293
294static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
295{
296 __u8 auth = opt;
297
298 BT_DBG("%s %x", hdev->name, auth);
299
300 /* Authentication */
a9de9248 301 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
302}
303
304static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
305{
306 __u8 encrypt = opt;
307
308 BT_DBG("%s %x", hdev->name, encrypt);
309
e4e8e37c 310 /* Encryption */
a9de9248 311 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
312}
313
e4e8e37c
MH
314static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
315{
316 __le16 policy = cpu_to_le16(opt);
317
a418b893 318 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
319
320 /* Default link policy */
321 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
322}
323
8e87d142 324/* Get HCI device by index.
1da177e4
LT
325 * Device is held on return. */
326struct hci_dev *hci_dev_get(int index)
327{
8035ded4 328 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
329
330 BT_DBG("%d", index);
331
332 if (index < 0)
333 return NULL;
334
335 read_lock(&hci_dev_list_lock);
8035ded4 336 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
337 if (d->id == index) {
338 hdev = hci_dev_hold(d);
339 break;
340 }
341 }
342 read_unlock(&hci_dev_list_lock);
343 return hdev;
344}
1da177e4
LT
345
346/* ---- Inquiry support ---- */
ff9ef578 347
30dc78e1
JH
348bool hci_discovery_active(struct hci_dev *hdev)
349{
350 struct discovery_state *discov = &hdev->discovery;
351
6fbe195d 352 switch (discov->state) {
343f935b 353 case DISCOVERY_FINDING:
6fbe195d 354 case DISCOVERY_RESOLVING:
30dc78e1
JH
355 return true;
356
6fbe195d
AG
357 default:
358 return false;
359 }
30dc78e1
JH
360}
361
ff9ef578
JH
362void hci_discovery_set_state(struct hci_dev *hdev, int state)
363{
364 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
365
366 if (hdev->discovery.state == state)
367 return;
368
369 switch (state) {
370 case DISCOVERY_STOPPED:
7b99b659
AG
371 if (hdev->discovery.state != DISCOVERY_STARTING)
372 mgmt_discovering(hdev, 0);
ff9ef578
JH
373 break;
374 case DISCOVERY_STARTING:
375 break;
343f935b 376 case DISCOVERY_FINDING:
ff9ef578
JH
377 mgmt_discovering(hdev, 1);
378 break;
30dc78e1
JH
379 case DISCOVERY_RESOLVING:
380 break;
ff9ef578
JH
381 case DISCOVERY_STOPPING:
382 break;
383 }
384
385 hdev->discovery.state = state;
386}
387
1da177e4
LT
388static void inquiry_cache_flush(struct hci_dev *hdev)
389{
30883512 390 struct discovery_state *cache = &hdev->discovery;
b57c1a56 391 struct inquiry_entry *p, *n;
1da177e4 392
561aafbc
JH
393 list_for_each_entry_safe(p, n, &cache->all, all) {
394 list_del(&p->all);
b57c1a56 395 kfree(p);
1da177e4 396 }
561aafbc
JH
397
398 INIT_LIST_HEAD(&cache->unknown);
399 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
400}
401
a8c5fb1a
GP
402struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
403 bdaddr_t *bdaddr)
1da177e4 404{
30883512 405 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
406 struct inquiry_entry *e;
407
408 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
409
561aafbc
JH
410 list_for_each_entry(e, &cache->all, all) {
411 if (!bacmp(&e->data.bdaddr, bdaddr))
412 return e;
413 }
414
415 return NULL;
416}
417
418struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 419 bdaddr_t *bdaddr)
561aafbc 420{
30883512 421 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
422 struct inquiry_entry *e;
423
424 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
425
426 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 427 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
428 return e;
429 }
430
431 return NULL;
1da177e4
LT
432}
433
30dc78e1 434struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
435 bdaddr_t *bdaddr,
436 int state)
30dc78e1
JH
437{
438 struct discovery_state *cache = &hdev->discovery;
439 struct inquiry_entry *e;
440
441 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
442
443 list_for_each_entry(e, &cache->resolve, list) {
444 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
445 return e;
446 if (!bacmp(&e->data.bdaddr, bdaddr))
447 return e;
448 }
449
450 return NULL;
451}
452
a3d4e20a 453void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 454 struct inquiry_entry *ie)
a3d4e20a
JH
455{
456 struct discovery_state *cache = &hdev->discovery;
457 struct list_head *pos = &cache->resolve;
458 struct inquiry_entry *p;
459
460 list_del(&ie->list);
461
462 list_for_each_entry(p, &cache->resolve, list) {
463 if (p->name_state != NAME_PENDING &&
a8c5fb1a 464 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
465 break;
466 pos = &p->list;
467 }
468
469 list_add(&ie->list, pos);
470}
471
3175405b 472bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 473 bool name_known, bool *ssp)
1da177e4 474{
30883512 475 struct discovery_state *cache = &hdev->discovery;
70f23020 476 struct inquiry_entry *ie;
1da177e4
LT
477
478 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
479
388fc8fa
JH
480 if (ssp)
481 *ssp = data->ssp_mode;
482
70f23020 483 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 484 if (ie) {
388fc8fa
JH
485 if (ie->data.ssp_mode && ssp)
486 *ssp = true;
487
a3d4e20a 488 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 489 data->rssi != ie->data.rssi) {
a3d4e20a
JH
490 ie->data.rssi = data->rssi;
491 hci_inquiry_cache_update_resolve(hdev, ie);
492 }
493
561aafbc 494 goto update;
a3d4e20a 495 }
561aafbc
JH
496
497 /* Entry not in the cache. Add new one. */
498 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
499 if (!ie)
3175405b 500 return false;
561aafbc
JH
501
502 list_add(&ie->all, &cache->all);
503
504 if (name_known) {
505 ie->name_state = NAME_KNOWN;
506 } else {
507 ie->name_state = NAME_NOT_KNOWN;
508 list_add(&ie->list, &cache->unknown);
509 }
70f23020 510
561aafbc
JH
511update:
512 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 513 ie->name_state != NAME_PENDING) {
561aafbc
JH
514 ie->name_state = NAME_KNOWN;
515 list_del(&ie->list);
1da177e4
LT
516 }
517
70f23020
AE
518 memcpy(&ie->data, data, sizeof(*data));
519 ie->timestamp = jiffies;
1da177e4 520 cache->timestamp = jiffies;
3175405b
JH
521
522 if (ie->name_state == NAME_NOT_KNOWN)
523 return false;
524
525 return true;
1da177e4
LT
526}
527
528static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
529{
30883512 530 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
531 struct inquiry_info *info = (struct inquiry_info *) buf;
532 struct inquiry_entry *e;
533 int copied = 0;
534
561aafbc 535 list_for_each_entry(e, &cache->all, all) {
1da177e4 536 struct inquiry_data *data = &e->data;
b57c1a56
JH
537
538 if (copied >= num)
539 break;
540
1da177e4
LT
541 bacpy(&info->bdaddr, &data->bdaddr);
542 info->pscan_rep_mode = data->pscan_rep_mode;
543 info->pscan_period_mode = data->pscan_period_mode;
544 info->pscan_mode = data->pscan_mode;
545 memcpy(info->dev_class, data->dev_class, 3);
546 info->clock_offset = data->clock_offset;
b57c1a56 547
1da177e4 548 info++;
b57c1a56 549 copied++;
1da177e4
LT
550 }
551
552 BT_DBG("cache %p, copied %d", cache, copied);
553 return copied;
554}
555
556static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
557{
558 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
559 struct hci_cp_inquiry cp;
560
561 BT_DBG("%s", hdev->name);
562
563 if (test_bit(HCI_INQUIRY, &hdev->flags))
564 return;
565
566 /* Start Inquiry */
567 memcpy(&cp.lap, &ir->lap, 3);
568 cp.length = ir->length;
569 cp.num_rsp = ir->num_rsp;
a9de9248 570 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
571}
572
573int hci_inquiry(void __user *arg)
574{
575 __u8 __user *ptr = arg;
576 struct hci_inquiry_req ir;
577 struct hci_dev *hdev;
578 int err = 0, do_inquiry = 0, max_rsp;
579 long timeo;
580 __u8 *buf;
581
582 if (copy_from_user(&ir, ptr, sizeof(ir)))
583 return -EFAULT;
584
5a08ecce
AE
585 hdev = hci_dev_get(ir.dev_id);
586 if (!hdev)
1da177e4
LT
587 return -ENODEV;
588
09fd0de5 589 hci_dev_lock(hdev);
8e87d142 590 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 591 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
592 inquiry_cache_flush(hdev);
593 do_inquiry = 1;
594 }
09fd0de5 595 hci_dev_unlock(hdev);
1da177e4 596
04837f64 597 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
598
599 if (do_inquiry) {
600 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
601 if (err < 0)
602 goto done;
603 }
1da177e4 604
8fc9ced3
GP
605 /* for unlimited number of responses we will use buffer with
606 * 255 entries
607 */
1da177e4
LT
608 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
609
610 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611 * copy it to the user space.
612 */
01df8c31 613 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 614 if (!buf) {
1da177e4
LT
615 err = -ENOMEM;
616 goto done;
617 }
618
09fd0de5 619 hci_dev_lock(hdev);
1da177e4 620 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 621 hci_dev_unlock(hdev);
1da177e4
LT
622
623 BT_DBG("num_rsp %d", ir.num_rsp);
624
625 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
626 ptr += sizeof(ir);
627 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 628 ir.num_rsp))
1da177e4 629 err = -EFAULT;
8e87d142 630 } else
1da177e4
LT
631 err = -EFAULT;
632
633 kfree(buf);
634
635done:
636 hci_dev_put(hdev);
637 return err;
638}
639
640/* ---- HCI ioctl helpers ---- */
641
642int hci_dev_open(__u16 dev)
643{
644 struct hci_dev *hdev;
645 int ret = 0;
646
5a08ecce
AE
647 hdev = hci_dev_get(dev);
648 if (!hdev)
1da177e4
LT
649 return -ENODEV;
650
651 BT_DBG("%s %p", hdev->name, hdev);
652
653 hci_req_lock(hdev);
654
94324962
JH
655 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
656 ret = -ENODEV;
657 goto done;
658 }
659
611b30f7
MH
660 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
661 ret = -ERFKILL;
662 goto done;
663 }
664
1da177e4
LT
665 if (test_bit(HCI_UP, &hdev->flags)) {
666 ret = -EALREADY;
667 goto done;
668 }
669
670 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
671 set_bit(HCI_RAW, &hdev->flags);
672
07e3b94a
AE
673 /* Treat all non BR/EDR controllers as raw devices if
674 enable_hs is not set */
675 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
676 set_bit(HCI_RAW, &hdev->flags);
677
1da177e4
LT
678 if (hdev->open(hdev)) {
679 ret = -EIO;
680 goto done;
681 }
682
683 if (!test_bit(HCI_RAW, &hdev->flags)) {
684 atomic_set(&hdev->cmd_cnt, 1);
685 set_bit(HCI_INIT, &hdev->flags);
a5040efa 686 hdev->init_last_cmd = 0;
1da177e4 687
5f246e89 688 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
1da177e4 689
eead27da 690 if (lmp_host_le_capable(hdev))
6ed58ec5 691 ret = __hci_request(hdev, hci_le_init_req, 0,
5f246e89 692 HCI_INIT_TIMEOUT);
6ed58ec5 693
1da177e4
LT
694 clear_bit(HCI_INIT, &hdev->flags);
695 }
696
697 if (!ret) {
698 hci_dev_hold(hdev);
699 set_bit(HCI_UP, &hdev->flags);
700 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a
AE
701 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
702 mgmt_valid_hdev(hdev)) {
09fd0de5 703 hci_dev_lock(hdev);
744cf19e 704 mgmt_powered(hdev, 1);
09fd0de5 705 hci_dev_unlock(hdev);
56e5cb86 706 }
8e87d142 707 } else {
1da177e4 708 /* Init failed, cleanup */
3eff45ea 709 flush_work(&hdev->tx_work);
c347b765 710 flush_work(&hdev->cmd_work);
b78752cc 711 flush_work(&hdev->rx_work);
1da177e4
LT
712
713 skb_queue_purge(&hdev->cmd_q);
714 skb_queue_purge(&hdev->rx_q);
715
716 if (hdev->flush)
717 hdev->flush(hdev);
718
719 if (hdev->sent_cmd) {
720 kfree_skb(hdev->sent_cmd);
721 hdev->sent_cmd = NULL;
722 }
723
724 hdev->close(hdev);
725 hdev->flags = 0;
726 }
727
728done:
729 hci_req_unlock(hdev);
730 hci_dev_put(hdev);
731 return ret;
732}
733
734static int hci_dev_do_close(struct hci_dev *hdev)
735{
736 BT_DBG("%s %p", hdev->name, hdev);
737
28b75a89
AG
738 cancel_work_sync(&hdev->le_scan);
739
1da177e4
LT
740 hci_req_cancel(hdev, ENODEV);
741 hci_req_lock(hdev);
742
743 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 744 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
745 hci_req_unlock(hdev);
746 return 0;
747 }
748
3eff45ea
GP
749 /* Flush RX and TX works */
750 flush_work(&hdev->tx_work);
b78752cc 751 flush_work(&hdev->rx_work);
1da177e4 752
16ab91ab 753 if (hdev->discov_timeout > 0) {
e0f9309f 754 cancel_delayed_work(&hdev->discov_off);
16ab91ab 755 hdev->discov_timeout = 0;
5e5282bb 756 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
757 }
758
a8b2d5c2 759 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
760 cancel_delayed_work(&hdev->service_cache);
761
7ba8b4be
AG
762 cancel_delayed_work_sync(&hdev->le_scan_disable);
763
09fd0de5 764 hci_dev_lock(hdev);
1da177e4
LT
765 inquiry_cache_flush(hdev);
766 hci_conn_hash_flush(hdev);
09fd0de5 767 hci_dev_unlock(hdev);
1da177e4
LT
768
769 hci_notify(hdev, HCI_DEV_DOWN);
770
771 if (hdev->flush)
772 hdev->flush(hdev);
773
774 /* Reset device */
775 skb_queue_purge(&hdev->cmd_q);
776 atomic_set(&hdev->cmd_cnt, 1);
8af59467 777 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 778 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 779 set_bit(HCI_INIT, &hdev->flags);
5f246e89 780 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
781 clear_bit(HCI_INIT, &hdev->flags);
782 }
783
c347b765
GP
784 /* flush cmd work */
785 flush_work(&hdev->cmd_work);
1da177e4
LT
786
787 /* Drop queues */
788 skb_queue_purge(&hdev->rx_q);
789 skb_queue_purge(&hdev->cmd_q);
790 skb_queue_purge(&hdev->raw_q);
791
792 /* Drop last sent command */
793 if (hdev->sent_cmd) {
b79f44c1 794 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
795 kfree_skb(hdev->sent_cmd);
796 hdev->sent_cmd = NULL;
797 }
798
799 /* After this point our queues are empty
800 * and no tasks are scheduled. */
801 hdev->close(hdev);
802
bb4b2a9a
AE
803 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
804 mgmt_valid_hdev(hdev)) {
8ee56540
MH
805 hci_dev_lock(hdev);
806 mgmt_powered(hdev, 0);
807 hci_dev_unlock(hdev);
808 }
5add6af8 809
1da177e4
LT
810 /* Clear flags */
811 hdev->flags = 0;
812
e59fda8d 813 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 814 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 815
1da177e4
LT
816 hci_req_unlock(hdev);
817
818 hci_dev_put(hdev);
819 return 0;
820}
821
822int hci_dev_close(__u16 dev)
823{
824 struct hci_dev *hdev;
825 int err;
826
70f23020
AE
827 hdev = hci_dev_get(dev);
828 if (!hdev)
1da177e4 829 return -ENODEV;
8ee56540
MH
830
831 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
832 cancel_delayed_work(&hdev->power_off);
833
1da177e4 834 err = hci_dev_do_close(hdev);
8ee56540 835
1da177e4
LT
836 hci_dev_put(hdev);
837 return err;
838}
839
840int hci_dev_reset(__u16 dev)
841{
842 struct hci_dev *hdev;
843 int ret = 0;
844
70f23020
AE
845 hdev = hci_dev_get(dev);
846 if (!hdev)
1da177e4
LT
847 return -ENODEV;
848
849 hci_req_lock(hdev);
1da177e4
LT
850
851 if (!test_bit(HCI_UP, &hdev->flags))
852 goto done;
853
854 /* Drop queues */
855 skb_queue_purge(&hdev->rx_q);
856 skb_queue_purge(&hdev->cmd_q);
857
09fd0de5 858 hci_dev_lock(hdev);
1da177e4
LT
859 inquiry_cache_flush(hdev);
860 hci_conn_hash_flush(hdev);
09fd0de5 861 hci_dev_unlock(hdev);
1da177e4
LT
862
863 if (hdev->flush)
864 hdev->flush(hdev);
865
8e87d142 866 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 867 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
868
869 if (!test_bit(HCI_RAW, &hdev->flags))
5f246e89 870 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
871
872done:
1da177e4
LT
873 hci_req_unlock(hdev);
874 hci_dev_put(hdev);
875 return ret;
876}
877
878int hci_dev_reset_stat(__u16 dev)
879{
880 struct hci_dev *hdev;
881 int ret = 0;
882
70f23020
AE
883 hdev = hci_dev_get(dev);
884 if (!hdev)
1da177e4
LT
885 return -ENODEV;
886
887 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
888
889 hci_dev_put(hdev);
890
891 return ret;
892}
893
894int hci_dev_cmd(unsigned int cmd, void __user *arg)
895{
896 struct hci_dev *hdev;
897 struct hci_dev_req dr;
898 int err = 0;
899
900 if (copy_from_user(&dr, arg, sizeof(dr)))
901 return -EFAULT;
902
70f23020
AE
903 hdev = hci_dev_get(dr.dev_id);
904 if (!hdev)
1da177e4
LT
905 return -ENODEV;
906
907 switch (cmd) {
908 case HCISETAUTH:
04837f64 909 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
5f246e89 910 HCI_INIT_TIMEOUT);
1da177e4
LT
911 break;
912
913 case HCISETENCRYPT:
914 if (!lmp_encrypt_capable(hdev)) {
915 err = -EOPNOTSUPP;
916 break;
917 }
918
919 if (!test_bit(HCI_AUTH, &hdev->flags)) {
920 /* Auth must be enabled first */
04837f64 921 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
5f246e89 922 HCI_INIT_TIMEOUT);
1da177e4
LT
923 if (err)
924 break;
925 }
926
04837f64 927 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
5f246e89 928 HCI_INIT_TIMEOUT);
1da177e4
LT
929 break;
930
931 case HCISETSCAN:
04837f64 932 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
5f246e89 933 HCI_INIT_TIMEOUT);
1da177e4
LT
934 break;
935
1da177e4 936 case HCISETLINKPOL:
e4e8e37c 937 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
5f246e89 938 HCI_INIT_TIMEOUT);
1da177e4
LT
939 break;
940
941 case HCISETLINKMODE:
e4e8e37c
MH
942 hdev->link_mode = ((__u16) dr.dev_opt) &
943 (HCI_LM_MASTER | HCI_LM_ACCEPT);
944 break;
945
946 case HCISETPTYPE:
947 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
948 break;
949
950 case HCISETACLMTU:
e4e8e37c
MH
951 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
952 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
953 break;
954
955 case HCISETSCOMTU:
e4e8e37c
MH
956 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
957 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
958 break;
959
960 default:
961 err = -EINVAL;
962 break;
963 }
e4e8e37c 964
1da177e4
LT
965 hci_dev_put(hdev);
966 return err;
967}
968
969int hci_get_dev_list(void __user *arg)
970{
8035ded4 971 struct hci_dev *hdev;
1da177e4
LT
972 struct hci_dev_list_req *dl;
973 struct hci_dev_req *dr;
1da177e4
LT
974 int n = 0, size, err;
975 __u16 dev_num;
976
977 if (get_user(dev_num, (__u16 __user *) arg))
978 return -EFAULT;
979
980 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
981 return -EINVAL;
982
983 size = sizeof(*dl) + dev_num * sizeof(*dr);
984
70f23020
AE
985 dl = kzalloc(size, GFP_KERNEL);
986 if (!dl)
1da177e4
LT
987 return -ENOMEM;
988
989 dr = dl->dev_req;
990
f20d09d5 991 read_lock(&hci_dev_list_lock);
8035ded4 992 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 993 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 994 cancel_delayed_work(&hdev->power_off);
c542a06c 995
a8b2d5c2
JH
996 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
997 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 998
1da177e4
LT
999 (dr + n)->dev_id = hdev->id;
1000 (dr + n)->dev_opt = hdev->flags;
c542a06c 1001
1da177e4
LT
1002 if (++n >= dev_num)
1003 break;
1004 }
f20d09d5 1005 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1006
1007 dl->dev_num = n;
1008 size = sizeof(*dl) + n * sizeof(*dr);
1009
1010 err = copy_to_user(arg, dl, size);
1011 kfree(dl);
1012
1013 return err ? -EFAULT : 0;
1014}
1015
1016int hci_get_dev_info(void __user *arg)
1017{
1018 struct hci_dev *hdev;
1019 struct hci_dev_info di;
1020 int err = 0;
1021
1022 if (copy_from_user(&di, arg, sizeof(di)))
1023 return -EFAULT;
1024
70f23020
AE
1025 hdev = hci_dev_get(di.dev_id);
1026 if (!hdev)
1da177e4
LT
1027 return -ENODEV;
1028
a8b2d5c2 1029 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1030 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1031
a8b2d5c2
JH
1032 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1033 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1034
1da177e4
LT
1035 strcpy(di.name, hdev->name);
1036 di.bdaddr = hdev->bdaddr;
943da25d 1037 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1038 di.flags = hdev->flags;
1039 di.pkt_type = hdev->pkt_type;
1040 di.acl_mtu = hdev->acl_mtu;
1041 di.acl_pkts = hdev->acl_pkts;
1042 di.sco_mtu = hdev->sco_mtu;
1043 di.sco_pkts = hdev->sco_pkts;
1044 di.link_policy = hdev->link_policy;
1045 di.link_mode = hdev->link_mode;
1046
1047 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1048 memcpy(&di.features, &hdev->features, sizeof(di.features));
1049
1050 if (copy_to_user(arg, &di, sizeof(di)))
1051 err = -EFAULT;
1052
1053 hci_dev_put(hdev);
1054
1055 return err;
1056}
1057
1058/* ---- Interface to HCI drivers ---- */
1059
611b30f7
MH
1060static int hci_rfkill_set_block(void *data, bool blocked)
1061{
1062 struct hci_dev *hdev = data;
1063
1064 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1065
1066 if (!blocked)
1067 return 0;
1068
1069 hci_dev_do_close(hdev);
1070
1071 return 0;
1072}
1073
1074static const struct rfkill_ops hci_rfkill_ops = {
1075 .set_block = hci_rfkill_set_block,
1076};
1077
ab81cbf9
JH
1078static void hci_power_on(struct work_struct *work)
1079{
1080 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1081
1082 BT_DBG("%s", hdev->name);
1083
1084 if (hci_dev_open(hdev->id) < 0)
1085 return;
1086
a8b2d5c2 1087 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
9345d40c 1088 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1089
a8b2d5c2 1090 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1091 mgmt_index_added(hdev);
ab81cbf9
JH
1092}
1093
1094static void hci_power_off(struct work_struct *work)
1095{
3243553f 1096 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1097 power_off.work);
ab81cbf9
JH
1098
1099 BT_DBG("%s", hdev->name);
1100
8ee56540 1101 hci_dev_do_close(hdev);
ab81cbf9
JH
1102}
1103
16ab91ab
JH
1104static void hci_discov_off(struct work_struct *work)
1105{
1106 struct hci_dev *hdev;
1107 u8 scan = SCAN_PAGE;
1108
1109 hdev = container_of(work, struct hci_dev, discov_off.work);
1110
1111 BT_DBG("%s", hdev->name);
1112
09fd0de5 1113 hci_dev_lock(hdev);
16ab91ab
JH
1114
1115 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1116
1117 hdev->discov_timeout = 0;
1118
09fd0de5 1119 hci_dev_unlock(hdev);
16ab91ab
JH
1120}
1121
2aeb9a1a
JH
1122int hci_uuids_clear(struct hci_dev *hdev)
1123{
1124 struct list_head *p, *n;
1125
1126 list_for_each_safe(p, n, &hdev->uuids) {
1127 struct bt_uuid *uuid;
1128
1129 uuid = list_entry(p, struct bt_uuid, list);
1130
1131 list_del(p);
1132 kfree(uuid);
1133 }
1134
1135 return 0;
1136}
1137
55ed8ca1
JH
1138int hci_link_keys_clear(struct hci_dev *hdev)
1139{
1140 struct list_head *p, *n;
1141
1142 list_for_each_safe(p, n, &hdev->link_keys) {
1143 struct link_key *key;
1144
1145 key = list_entry(p, struct link_key, list);
1146
1147 list_del(p);
1148 kfree(key);
1149 }
1150
1151 return 0;
1152}
1153
b899efaf
VCG
1154int hci_smp_ltks_clear(struct hci_dev *hdev)
1155{
1156 struct smp_ltk *k, *tmp;
1157
1158 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1159 list_del(&k->list);
1160 kfree(k);
1161 }
1162
1163 return 0;
1164}
1165
55ed8ca1
JH
1166struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1167{
8035ded4 1168 struct link_key *k;
55ed8ca1 1169
8035ded4 1170 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1171 if (bacmp(bdaddr, &k->bdaddr) == 0)
1172 return k;
55ed8ca1
JH
1173
1174 return NULL;
1175}
1176
745c0ce3 1177static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1178 u8 key_type, u8 old_key_type)
d25e28ab
JH
1179{
1180 /* Legacy key */
1181 if (key_type < 0x03)
745c0ce3 1182 return true;
d25e28ab
JH
1183
1184 /* Debug keys are insecure so don't store them persistently */
1185 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1186 return false;
d25e28ab
JH
1187
1188 /* Changed combination key and there's no previous one */
1189 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1190 return false;
d25e28ab
JH
1191
1192 /* Security mode 3 case */
1193 if (!conn)
745c0ce3 1194 return true;
d25e28ab
JH
1195
1196 /* Neither local nor remote side had no-bonding as requirement */
1197 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1198 return true;
d25e28ab
JH
1199
1200 /* Local side had dedicated bonding as requirement */
1201 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1202 return true;
d25e28ab
JH
1203
1204 /* Remote side had dedicated bonding as requirement */
1205 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1206 return true;
d25e28ab
JH
1207
1208 /* If none of the above criteria match, then don't store the key
1209 * persistently */
745c0ce3 1210 return false;
d25e28ab
JH
1211}
1212
c9839a11 1213struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1214{
c9839a11 1215 struct smp_ltk *k;
75d262c2 1216
c9839a11
VCG
1217 list_for_each_entry(k, &hdev->long_term_keys, list) {
1218 if (k->ediv != ediv ||
a8c5fb1a 1219 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1220 continue;
1221
c9839a11 1222 return k;
75d262c2
VCG
1223 }
1224
1225 return NULL;
1226}
75d262c2 1227
c9839a11 1228struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1229 u8 addr_type)
75d262c2 1230{
c9839a11 1231 struct smp_ltk *k;
75d262c2 1232
c9839a11
VCG
1233 list_for_each_entry(k, &hdev->long_term_keys, list)
1234 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1235 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1236 return k;
1237
1238 return NULL;
1239}
75d262c2 1240
d25e28ab 1241int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1242 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1243{
1244 struct link_key *key, *old_key;
745c0ce3
VA
1245 u8 old_key_type;
1246 bool persistent;
55ed8ca1
JH
1247
1248 old_key = hci_find_link_key(hdev, bdaddr);
1249 if (old_key) {
1250 old_key_type = old_key->type;
1251 key = old_key;
1252 } else {
12adcf3a 1253 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1254 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1255 if (!key)
1256 return -ENOMEM;
1257 list_add(&key->list, &hdev->link_keys);
1258 }
1259
1260 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1261
d25e28ab
JH
1262 /* Some buggy controller combinations generate a changed
1263 * combination key for legacy pairing even when there's no
1264 * previous key */
1265 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1266 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1267 type = HCI_LK_COMBINATION;
655fe6ec
JH
1268 if (conn)
1269 conn->key_type = type;
1270 }
d25e28ab 1271
55ed8ca1 1272 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1273 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1274 key->pin_len = pin_len;
1275
b6020ba0 1276 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1277 key->type = old_key_type;
4748fed2
JH
1278 else
1279 key->type = type;
1280
4df378a1
JH
1281 if (!new_key)
1282 return 0;
1283
1284 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1285
744cf19e 1286 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1287
6ec5bcad
VA
1288 if (conn)
1289 conn->flush_key = !persistent;
55ed8ca1
JH
1290
1291 return 0;
1292}
1293
c9839a11 1294int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1295 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1296 ediv, u8 rand[8])
75d262c2 1297{
c9839a11 1298 struct smp_ltk *key, *old_key;
75d262c2 1299
c9839a11
VCG
1300 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1301 return 0;
75d262c2 1302
c9839a11
VCG
1303 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1304 if (old_key)
75d262c2 1305 key = old_key;
c9839a11
VCG
1306 else {
1307 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1308 if (!key)
1309 return -ENOMEM;
c9839a11 1310 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1311 }
1312
75d262c2 1313 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1314 key->bdaddr_type = addr_type;
1315 memcpy(key->val, tk, sizeof(key->val));
1316 key->authenticated = authenticated;
1317 key->ediv = ediv;
1318 key->enc_size = enc_size;
1319 key->type = type;
1320 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1321
c9839a11
VCG
1322 if (!new_key)
1323 return 0;
75d262c2 1324
261cc5aa
VCG
1325 if (type & HCI_SMP_LTK)
1326 mgmt_new_ltk(hdev, key, 1);
1327
75d262c2
VCG
1328 return 0;
1329}
1330
55ed8ca1
JH
1331int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1332{
1333 struct link_key *key;
1334
1335 key = hci_find_link_key(hdev, bdaddr);
1336 if (!key)
1337 return -ENOENT;
1338
1339 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1340
1341 list_del(&key->list);
1342 kfree(key);
1343
1344 return 0;
1345}
1346
b899efaf
VCG
1347int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1348{
1349 struct smp_ltk *k, *tmp;
1350
1351 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1352 if (bacmp(bdaddr, &k->bdaddr))
1353 continue;
1354
1355 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1356
1357 list_del(&k->list);
1358 kfree(k);
1359 }
1360
1361 return 0;
1362}
1363
6bd32326 1364/* HCI command timer function */
bda4f23a 1365static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1366{
1367 struct hci_dev *hdev = (void *) arg;
1368
bda4f23a
AE
1369 if (hdev->sent_cmd) {
1370 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1371 u16 opcode = __le16_to_cpu(sent->opcode);
1372
1373 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1374 } else {
1375 BT_ERR("%s command tx timeout", hdev->name);
1376 }
1377
6bd32326 1378 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1379 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1380}
1381
2763eda6 1382struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1383 bdaddr_t *bdaddr)
2763eda6
SJ
1384{
1385 struct oob_data *data;
1386
1387 list_for_each_entry(data, &hdev->remote_oob_data, list)
1388 if (bacmp(bdaddr, &data->bdaddr) == 0)
1389 return data;
1390
1391 return NULL;
1392}
1393
1394int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1395{
1396 struct oob_data *data;
1397
1398 data = hci_find_remote_oob_data(hdev, bdaddr);
1399 if (!data)
1400 return -ENOENT;
1401
1402 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1403
1404 list_del(&data->list);
1405 kfree(data);
1406
1407 return 0;
1408}
1409
1410int hci_remote_oob_data_clear(struct hci_dev *hdev)
1411{
1412 struct oob_data *data, *n;
1413
1414 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1415 list_del(&data->list);
1416 kfree(data);
1417 }
1418
1419 return 0;
1420}
1421
1422int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1423 u8 *randomizer)
2763eda6
SJ
1424{
1425 struct oob_data *data;
1426
1427 data = hci_find_remote_oob_data(hdev, bdaddr);
1428
1429 if (!data) {
1430 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1431 if (!data)
1432 return -ENOMEM;
1433
1434 bacpy(&data->bdaddr, bdaddr);
1435 list_add(&data->list, &hdev->remote_oob_data);
1436 }
1437
1438 memcpy(data->hash, hash, sizeof(data->hash));
1439 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1440
1441 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1442
1443 return 0;
1444}
1445
04124681 1446struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1447{
8035ded4 1448 struct bdaddr_list *b;
b2a66aad 1449
8035ded4 1450 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1451 if (bacmp(bdaddr, &b->bdaddr) == 0)
1452 return b;
b2a66aad
AJ
1453
1454 return NULL;
1455}
1456
1457int hci_blacklist_clear(struct hci_dev *hdev)
1458{
1459 struct list_head *p, *n;
1460
1461 list_for_each_safe(p, n, &hdev->blacklist) {
1462 struct bdaddr_list *b;
1463
1464 b = list_entry(p, struct bdaddr_list, list);
1465
1466 list_del(p);
1467 kfree(b);
1468 }
1469
1470 return 0;
1471}
1472
88c1fe4b 1473int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1474{
1475 struct bdaddr_list *entry;
b2a66aad
AJ
1476
1477 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1478 return -EBADF;
1479
5e762444
AJ
1480 if (hci_blacklist_lookup(hdev, bdaddr))
1481 return -EEXIST;
b2a66aad
AJ
1482
1483 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1484 if (!entry)
1485 return -ENOMEM;
b2a66aad
AJ
1486
1487 bacpy(&entry->bdaddr, bdaddr);
1488
1489 list_add(&entry->list, &hdev->blacklist);
1490
88c1fe4b 1491 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1492}
1493
88c1fe4b 1494int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1495{
1496 struct bdaddr_list *entry;
b2a66aad 1497
1ec918ce 1498 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1499 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1500
1501 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1502 if (!entry)
5e762444 1503 return -ENOENT;
b2a66aad
AJ
1504
1505 list_del(&entry->list);
1506 kfree(entry);
1507
88c1fe4b 1508 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1509}
1510
7ba8b4be
AG
1511static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1512{
1513 struct le_scan_params *param = (struct le_scan_params *) opt;
1514 struct hci_cp_le_set_scan_param cp;
1515
1516 memset(&cp, 0, sizeof(cp));
1517 cp.type = param->type;
1518 cp.interval = cpu_to_le16(param->interval);
1519 cp.window = cpu_to_le16(param->window);
1520
1521 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1522}
1523
1524static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1525{
1526 struct hci_cp_le_set_scan_enable cp;
1527
1528 memset(&cp, 0, sizeof(cp));
1529 cp.enable = 1;
0431a43c 1530 cp.filter_dup = 1;
7ba8b4be
AG
1531
1532 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1533}
1534
1535static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1536 u16 window, int timeout)
7ba8b4be
AG
1537{
1538 long timeo = msecs_to_jiffies(3000);
1539 struct le_scan_params param;
1540 int err;
1541
1542 BT_DBG("%s", hdev->name);
1543
1544 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1545 return -EINPROGRESS;
1546
1547 param.type = type;
1548 param.interval = interval;
1549 param.window = window;
1550
1551 hci_req_lock(hdev);
1552
1553 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
04124681 1554 timeo);
7ba8b4be
AG
1555 if (!err)
1556 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1557
1558 hci_req_unlock(hdev);
1559
1560 if (err < 0)
1561 return err;
1562
1563 schedule_delayed_work(&hdev->le_scan_disable,
04124681 1564 msecs_to_jiffies(timeout));
7ba8b4be
AG
1565
1566 return 0;
1567}
1568
7dbfac1d
AG
1569int hci_cancel_le_scan(struct hci_dev *hdev)
1570{
1571 BT_DBG("%s", hdev->name);
1572
1573 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1574 return -EALREADY;
1575
1576 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1577 struct hci_cp_le_set_scan_enable cp;
1578
1579 /* Send HCI command to disable LE Scan */
1580 memset(&cp, 0, sizeof(cp));
1581 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1582 }
1583
1584 return 0;
1585}
1586
7ba8b4be
AG
1587static void le_scan_disable_work(struct work_struct *work)
1588{
1589 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1590 le_scan_disable.work);
7ba8b4be
AG
1591 struct hci_cp_le_set_scan_enable cp;
1592
1593 BT_DBG("%s", hdev->name);
1594
1595 memset(&cp, 0, sizeof(cp));
1596
1597 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1598}
1599
28b75a89
AG
1600static void le_scan_work(struct work_struct *work)
1601{
1602 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1603 struct le_scan_params *param = &hdev->le_scan_params;
1604
1605 BT_DBG("%s", hdev->name);
1606
04124681
GP
1607 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1608 param->timeout);
28b75a89
AG
1609}
1610
1611int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1612 int timeout)
28b75a89
AG
1613{
1614 struct le_scan_params *param = &hdev->le_scan_params;
1615
1616 BT_DBG("%s", hdev->name);
1617
1618 if (work_busy(&hdev->le_scan))
1619 return -EINPROGRESS;
1620
1621 param->type = type;
1622 param->interval = interval;
1623 param->window = window;
1624 param->timeout = timeout;
1625
1626 queue_work(system_long_wq, &hdev->le_scan);
1627
1628 return 0;
1629}
1630
9be0dab7
DH
1631/* Alloc HCI device */
1632struct hci_dev *hci_alloc_dev(void)
1633{
1634 struct hci_dev *hdev;
1635
1636 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1637 if (!hdev)
1638 return NULL;
1639
b1b813d4
DH
1640 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1641 hdev->esco_type = (ESCO_HV1);
1642 hdev->link_mode = (HCI_LM_ACCEPT);
1643 hdev->io_capability = 0x03; /* No Input No Output */
1644
b1b813d4
DH
1645 hdev->sniff_max_interval = 800;
1646 hdev->sniff_min_interval = 80;
1647
1648 mutex_init(&hdev->lock);
1649 mutex_init(&hdev->req_lock);
1650
1651 INIT_LIST_HEAD(&hdev->mgmt_pending);
1652 INIT_LIST_HEAD(&hdev->blacklist);
1653 INIT_LIST_HEAD(&hdev->uuids);
1654 INIT_LIST_HEAD(&hdev->link_keys);
1655 INIT_LIST_HEAD(&hdev->long_term_keys);
1656 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 1657 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
1658
1659 INIT_WORK(&hdev->rx_work, hci_rx_work);
1660 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1661 INIT_WORK(&hdev->tx_work, hci_tx_work);
1662 INIT_WORK(&hdev->power_on, hci_power_on);
1663 INIT_WORK(&hdev->le_scan, le_scan_work);
1664
b1b813d4
DH
1665 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1666 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1667 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1668
9be0dab7 1669 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
1670 skb_queue_head_init(&hdev->rx_q);
1671 skb_queue_head_init(&hdev->cmd_q);
1672 skb_queue_head_init(&hdev->raw_q);
1673
1674 init_waitqueue_head(&hdev->req_wait_q);
1675
bda4f23a 1676 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 1677
b1b813d4
DH
1678 hci_init_sysfs(hdev);
1679 discovery_init(hdev);
9be0dab7
DH
1680
1681 return hdev;
1682}
1683EXPORT_SYMBOL(hci_alloc_dev);
1684
1685/* Free HCI device */
1686void hci_free_dev(struct hci_dev *hdev)
1687{
1688 skb_queue_purge(&hdev->driver_init);
1689
1690 /* will free via device release */
1691 put_device(&hdev->dev);
1692}
1693EXPORT_SYMBOL(hci_free_dev);
1694
1da177e4
LT
1695/* Register HCI device */
1696int hci_register_dev(struct hci_dev *hdev)
1697{
b1b813d4 1698 int id, error;
1da177e4 1699
010666a1 1700 if (!hdev->open || !hdev->close)
1da177e4
LT
1701 return -EINVAL;
1702
08add513
MM
1703 /* Do not allow HCI_AMP devices to register at index 0,
1704 * so the index can be used as the AMP controller ID.
1705 */
3df92b31
SL
1706 switch (hdev->dev_type) {
1707 case HCI_BREDR:
1708 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1709 break;
1710 case HCI_AMP:
1711 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1712 break;
1713 default:
1714 return -EINVAL;
1da177e4 1715 }
8e87d142 1716
3df92b31
SL
1717 if (id < 0)
1718 return id;
1719
1da177e4
LT
1720 sprintf(hdev->name, "hci%d", id);
1721 hdev->id = id;
2d8b3a11
AE
1722
1723 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1724
3df92b31
SL
1725 write_lock(&hci_dev_list_lock);
1726 list_add(&hdev->list, &hci_dev_list);
f20d09d5 1727 write_unlock(&hci_dev_list_lock);
1da177e4 1728
32845eb1 1729 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 1730 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1731 if (!hdev->workqueue) {
1732 error = -ENOMEM;
1733 goto err;
1734 }
f48fd9c8 1735
33ca954d
DH
1736 error = hci_add_sysfs(hdev);
1737 if (error < 0)
1738 goto err_wqueue;
1da177e4 1739
611b30f7 1740 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
1741 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1742 hdev);
611b30f7
MH
1743 if (hdev->rfkill) {
1744 if (rfkill_register(hdev->rfkill) < 0) {
1745 rfkill_destroy(hdev->rfkill);
1746 hdev->rfkill = NULL;
1747 }
1748 }
1749
a8b2d5c2 1750 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
1751
1752 if (hdev->dev_type != HCI_AMP)
1753 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1754
7f971041 1755 schedule_work(&hdev->power_on);
ab81cbf9 1756
1da177e4 1757 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1758 hci_dev_hold(hdev);
1da177e4
LT
1759
1760 return id;
f48fd9c8 1761
33ca954d
DH
1762err_wqueue:
1763 destroy_workqueue(hdev->workqueue);
1764err:
3df92b31 1765 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 1766 write_lock(&hci_dev_list_lock);
f48fd9c8 1767 list_del(&hdev->list);
f20d09d5 1768 write_unlock(&hci_dev_list_lock);
f48fd9c8 1769
33ca954d 1770 return error;
1da177e4
LT
1771}
1772EXPORT_SYMBOL(hci_register_dev);
1773
1774/* Unregister HCI device */
59735631 1775void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1776{
3df92b31 1777 int i, id;
ef222013 1778
c13854ce 1779 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1780
94324962
JH
1781 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1782
3df92b31
SL
1783 id = hdev->id;
1784
f20d09d5 1785 write_lock(&hci_dev_list_lock);
1da177e4 1786 list_del(&hdev->list);
f20d09d5 1787 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1788
1789 hci_dev_do_close(hdev);
1790
cd4c5391 1791 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1792 kfree_skb(hdev->reassembly[i]);
1793
ab81cbf9 1794 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 1795 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1796 hci_dev_lock(hdev);
744cf19e 1797 mgmt_index_removed(hdev);
09fd0de5 1798 hci_dev_unlock(hdev);
56e5cb86 1799 }
ab81cbf9 1800
2e58ef3e
JH
1801 /* mgmt_index_removed should take care of emptying the
1802 * pending list */
1803 BUG_ON(!list_empty(&hdev->mgmt_pending));
1804
1da177e4
LT
1805 hci_notify(hdev, HCI_DEV_UNREG);
1806
611b30f7
MH
1807 if (hdev->rfkill) {
1808 rfkill_unregister(hdev->rfkill);
1809 rfkill_destroy(hdev->rfkill);
1810 }
1811
ce242970 1812 hci_del_sysfs(hdev);
147e2d59 1813
f48fd9c8
MH
1814 destroy_workqueue(hdev->workqueue);
1815
09fd0de5 1816 hci_dev_lock(hdev);
e2e0cacb 1817 hci_blacklist_clear(hdev);
2aeb9a1a 1818 hci_uuids_clear(hdev);
55ed8ca1 1819 hci_link_keys_clear(hdev);
b899efaf 1820 hci_smp_ltks_clear(hdev);
2763eda6 1821 hci_remote_oob_data_clear(hdev);
09fd0de5 1822 hci_dev_unlock(hdev);
e2e0cacb 1823
dc946bd8 1824 hci_dev_put(hdev);
3df92b31
SL
1825
1826 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
1827}
1828EXPORT_SYMBOL(hci_unregister_dev);
1829
1830/* Suspend HCI device */
1831int hci_suspend_dev(struct hci_dev *hdev)
1832{
1833 hci_notify(hdev, HCI_DEV_SUSPEND);
1834 return 0;
1835}
1836EXPORT_SYMBOL(hci_suspend_dev);
1837
1838/* Resume HCI device */
1839int hci_resume_dev(struct hci_dev *hdev)
1840{
1841 hci_notify(hdev, HCI_DEV_RESUME);
1842 return 0;
1843}
1844EXPORT_SYMBOL(hci_resume_dev);
1845
76bca880
MH
1846/* Receive frame from HCI drivers */
1847int hci_recv_frame(struct sk_buff *skb)
1848{
1849 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1850 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 1851 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
1852 kfree_skb(skb);
1853 return -ENXIO;
1854 }
1855
1856 /* Incomming skb */
1857 bt_cb(skb)->incoming = 1;
1858
1859 /* Time stamp */
1860 __net_timestamp(skb);
1861
76bca880 1862 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1863 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1864
76bca880
MH
1865 return 0;
1866}
1867EXPORT_SYMBOL(hci_recv_frame);
1868
33e882a5 1869static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 1870 int count, __u8 index)
33e882a5
SS
1871{
1872 int len = 0;
1873 int hlen = 0;
1874 int remain = count;
1875 struct sk_buff *skb;
1876 struct bt_skb_cb *scb;
1877
1878 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 1879 index >= NUM_REASSEMBLY)
33e882a5
SS
1880 return -EILSEQ;
1881
1882 skb = hdev->reassembly[index];
1883
1884 if (!skb) {
1885 switch (type) {
1886 case HCI_ACLDATA_PKT:
1887 len = HCI_MAX_FRAME_SIZE;
1888 hlen = HCI_ACL_HDR_SIZE;
1889 break;
1890 case HCI_EVENT_PKT:
1891 len = HCI_MAX_EVENT_SIZE;
1892 hlen = HCI_EVENT_HDR_SIZE;
1893 break;
1894 case HCI_SCODATA_PKT:
1895 len = HCI_MAX_SCO_SIZE;
1896 hlen = HCI_SCO_HDR_SIZE;
1897 break;
1898 }
1899
1e429f38 1900 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1901 if (!skb)
1902 return -ENOMEM;
1903
1904 scb = (void *) skb->cb;
1905 scb->expect = hlen;
1906 scb->pkt_type = type;
1907
1908 skb->dev = (void *) hdev;
1909 hdev->reassembly[index] = skb;
1910 }
1911
1912 while (count) {
1913 scb = (void *) skb->cb;
89bb46d0 1914 len = min_t(uint, scb->expect, count);
33e882a5
SS
1915
1916 memcpy(skb_put(skb, len), data, len);
1917
1918 count -= len;
1919 data += len;
1920 scb->expect -= len;
1921 remain = count;
1922
1923 switch (type) {
1924 case HCI_EVENT_PKT:
1925 if (skb->len == HCI_EVENT_HDR_SIZE) {
1926 struct hci_event_hdr *h = hci_event_hdr(skb);
1927 scb->expect = h->plen;
1928
1929 if (skb_tailroom(skb) < scb->expect) {
1930 kfree_skb(skb);
1931 hdev->reassembly[index] = NULL;
1932 return -ENOMEM;
1933 }
1934 }
1935 break;
1936
1937 case HCI_ACLDATA_PKT:
1938 if (skb->len == HCI_ACL_HDR_SIZE) {
1939 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1940 scb->expect = __le16_to_cpu(h->dlen);
1941
1942 if (skb_tailroom(skb) < scb->expect) {
1943 kfree_skb(skb);
1944 hdev->reassembly[index] = NULL;
1945 return -ENOMEM;
1946 }
1947 }
1948 break;
1949
1950 case HCI_SCODATA_PKT:
1951 if (skb->len == HCI_SCO_HDR_SIZE) {
1952 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1953 scb->expect = h->dlen;
1954
1955 if (skb_tailroom(skb) < scb->expect) {
1956 kfree_skb(skb);
1957 hdev->reassembly[index] = NULL;
1958 return -ENOMEM;
1959 }
1960 }
1961 break;
1962 }
1963
1964 if (scb->expect == 0) {
1965 /* Complete frame */
1966
1967 bt_cb(skb)->pkt_type = type;
1968 hci_recv_frame(skb);
1969
1970 hdev->reassembly[index] = NULL;
1971 return remain;
1972 }
1973 }
1974
1975 return remain;
1976}
1977
ef222013
MH
1978int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1979{
f39a3c06
SS
1980 int rem = 0;
1981
ef222013
MH
1982 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1983 return -EILSEQ;
1984
da5f6c37 1985 while (count) {
1e429f38 1986 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1987 if (rem < 0)
1988 return rem;
ef222013 1989
f39a3c06
SS
1990 data += (count - rem);
1991 count = rem;
f81c6224 1992 }
ef222013 1993
f39a3c06 1994 return rem;
ef222013
MH
1995}
1996EXPORT_SYMBOL(hci_recv_fragment);
1997
99811510
SS
1998#define STREAM_REASSEMBLY 0
1999
2000int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2001{
2002 int type;
2003 int rem = 0;
2004
da5f6c37 2005 while (count) {
99811510
SS
2006 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2007
2008 if (!skb) {
2009 struct { char type; } *pkt;
2010
2011 /* Start of the frame */
2012 pkt = data;
2013 type = pkt->type;
2014
2015 data++;
2016 count--;
2017 } else
2018 type = bt_cb(skb)->pkt_type;
2019
1e429f38 2020 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2021 STREAM_REASSEMBLY);
99811510
SS
2022 if (rem < 0)
2023 return rem;
2024
2025 data += (count - rem);
2026 count = rem;
f81c6224 2027 }
99811510
SS
2028
2029 return rem;
2030}
2031EXPORT_SYMBOL(hci_recv_stream_fragment);
2032
1da177e4
LT
2033/* ---- Interface to upper protocols ---- */
2034
1da177e4
LT
2035int hci_register_cb(struct hci_cb *cb)
2036{
2037 BT_DBG("%p name %s", cb, cb->name);
2038
f20d09d5 2039 write_lock(&hci_cb_list_lock);
1da177e4 2040 list_add(&cb->list, &hci_cb_list);
f20d09d5 2041 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2042
2043 return 0;
2044}
2045EXPORT_SYMBOL(hci_register_cb);
2046
2047int hci_unregister_cb(struct hci_cb *cb)
2048{
2049 BT_DBG("%p name %s", cb, cb->name);
2050
f20d09d5 2051 write_lock(&hci_cb_list_lock);
1da177e4 2052 list_del(&cb->list);
f20d09d5 2053 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2054
2055 return 0;
2056}
2057EXPORT_SYMBOL(hci_unregister_cb);
2058
2059static int hci_send_frame(struct sk_buff *skb)
2060{
2061 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2062
2063 if (!hdev) {
2064 kfree_skb(skb);
2065 return -ENODEV;
2066 }
2067
0d48d939 2068 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2069
cd82e61c
MH
2070 /* Time stamp */
2071 __net_timestamp(skb);
1da177e4 2072
cd82e61c
MH
2073 /* Send copy to monitor */
2074 hci_send_to_monitor(hdev, skb);
2075
2076 if (atomic_read(&hdev->promisc)) {
2077 /* Send copy to the sockets */
470fe1b5 2078 hci_send_to_sock(hdev, skb);
1da177e4
LT
2079 }
2080
2081 /* Get rid of skb owner, prior to sending to the driver. */
2082 skb_orphan(skb);
2083
2084 return hdev->send(skb);
2085}
2086
2087/* Send HCI command */
a9de9248 2088int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2089{
2090 int len = HCI_COMMAND_HDR_SIZE + plen;
2091 struct hci_command_hdr *hdr;
2092 struct sk_buff *skb;
2093
f0e09510 2094 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2095
2096 skb = bt_skb_alloc(len, GFP_ATOMIC);
2097 if (!skb) {
ef222013 2098 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2099 return -ENOMEM;
2100 }
2101
2102 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2103 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2104 hdr->plen = plen;
2105
2106 if (plen)
2107 memcpy(skb_put(skb, plen), param, plen);
2108
2109 BT_DBG("skb len %d", skb->len);
2110
0d48d939 2111 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2112 skb->dev = (void *) hdev;
c78ae283 2113
a5040efa
JH
2114 if (test_bit(HCI_INIT, &hdev->flags))
2115 hdev->init_last_cmd = opcode;
2116
1da177e4 2117 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2118 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2119
2120 return 0;
2121}
1da177e4
LT
2122
2123/* Get data from the previously sent command */
a9de9248 2124void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2125{
2126 struct hci_command_hdr *hdr;
2127
2128 if (!hdev->sent_cmd)
2129 return NULL;
2130
2131 hdr = (void *) hdev->sent_cmd->data;
2132
a9de9248 2133 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2134 return NULL;
2135
f0e09510 2136 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2137
2138 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2139}
2140
2141/* Send ACL data */
2142static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2143{
2144 struct hci_acl_hdr *hdr;
2145 int len = skb->len;
2146
badff6d0
ACM
2147 skb_push(skb, HCI_ACL_HDR_SIZE);
2148 skb_reset_transport_header(skb);
9c70220b 2149 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2150 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2151 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2152}
2153
73d80deb 2154static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
a8c5fb1a 2155 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2156{
2157 struct hci_dev *hdev = conn->hdev;
2158 struct sk_buff *list;
2159
087bfd99
GP
2160 skb->len = skb_headlen(skb);
2161 skb->data_len = 0;
2162
2163 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2164 hci_add_acl_hdr(skb, conn->handle, flags);
2165
70f23020
AE
2166 list = skb_shinfo(skb)->frag_list;
2167 if (!list) {
1da177e4
LT
2168 /* Non fragmented */
2169 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2170
73d80deb 2171 skb_queue_tail(queue, skb);
1da177e4
LT
2172 } else {
2173 /* Fragmented */
2174 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2175
2176 skb_shinfo(skb)->frag_list = NULL;
2177
2178 /* Queue all fragments atomically */
af3e6359 2179 spin_lock(&queue->lock);
1da177e4 2180
73d80deb 2181 __skb_queue_tail(queue, skb);
e702112f
AE
2182
2183 flags &= ~ACL_START;
2184 flags |= ACL_CONT;
1da177e4
LT
2185 do {
2186 skb = list; list = list->next;
8e87d142 2187
1da177e4 2188 skb->dev = (void *) hdev;
0d48d939 2189 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2190 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2191
2192 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2193
73d80deb 2194 __skb_queue_tail(queue, skb);
1da177e4
LT
2195 } while (list);
2196
af3e6359 2197 spin_unlock(&queue->lock);
1da177e4 2198 }
73d80deb
LAD
2199}
2200
2201void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2202{
2203 struct hci_conn *conn = chan->conn;
2204 struct hci_dev *hdev = conn->hdev;
2205
f0e09510 2206 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2207
2208 skb->dev = (void *) hdev;
73d80deb
LAD
2209
2210 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2211
3eff45ea 2212 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2213}
1da177e4
LT
2214
2215/* Send SCO data */
0d861d8b 2216void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2217{
2218 struct hci_dev *hdev = conn->hdev;
2219 struct hci_sco_hdr hdr;
2220
2221 BT_DBG("%s len %d", hdev->name, skb->len);
2222
aca3192c 2223 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2224 hdr.dlen = skb->len;
2225
badff6d0
ACM
2226 skb_push(skb, HCI_SCO_HDR_SIZE);
2227 skb_reset_transport_header(skb);
9c70220b 2228 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2229
2230 skb->dev = (void *) hdev;
0d48d939 2231 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2232
1da177e4 2233 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2234 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2235}
1da177e4
LT
2236
2237/* ---- HCI TX task (outgoing data) ---- */
2238
2239/* HCI Connection scheduler */
6039aa73
GP
2240static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2241 int *quote)
1da177e4
LT
2242{
2243 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2244 struct hci_conn *conn = NULL, *c;
abc5de8f 2245 unsigned int num = 0, min = ~0;
1da177e4 2246
8e87d142 2247 /* We don't have to lock device here. Connections are always
1da177e4 2248 * added and removed with TX task disabled. */
bf4c6325
GP
2249
2250 rcu_read_lock();
2251
2252 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2253 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2254 continue;
769be974
MH
2255
2256 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2257 continue;
2258
1da177e4
LT
2259 num++;
2260
2261 if (c->sent < min) {
2262 min = c->sent;
2263 conn = c;
2264 }
52087a79
LAD
2265
2266 if (hci_conn_num(hdev, type) == num)
2267 break;
1da177e4
LT
2268 }
2269
bf4c6325
GP
2270 rcu_read_unlock();
2271
1da177e4 2272 if (conn) {
6ed58ec5
VT
2273 int cnt, q;
2274
2275 switch (conn->type) {
2276 case ACL_LINK:
2277 cnt = hdev->acl_cnt;
2278 break;
2279 case SCO_LINK:
2280 case ESCO_LINK:
2281 cnt = hdev->sco_cnt;
2282 break;
2283 case LE_LINK:
2284 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2285 break;
2286 default:
2287 cnt = 0;
2288 BT_ERR("Unknown link type");
2289 }
2290
2291 q = cnt / num;
1da177e4
LT
2292 *quote = q ? q : 1;
2293 } else
2294 *quote = 0;
2295
2296 BT_DBG("conn %p quote %d", conn, *quote);
2297 return conn;
2298}
2299
6039aa73 2300static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2301{
2302 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2303 struct hci_conn *c;
1da177e4 2304
bae1f5d9 2305 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2306
bf4c6325
GP
2307 rcu_read_lock();
2308
1da177e4 2309 /* Kill stalled connections */
bf4c6325 2310 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2311 if (c->type == type && c->sent) {
2312 BT_ERR("%s killing stalled connection %s",
a8c5fb1a 2313 hdev->name, batostr(&c->dst));
7490c6c2 2314 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2315 }
2316 }
bf4c6325
GP
2317
2318 rcu_read_unlock();
1da177e4
LT
2319}
2320
6039aa73
GP
2321static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2322 int *quote)
1da177e4 2323{
73d80deb
LAD
2324 struct hci_conn_hash *h = &hdev->conn_hash;
2325 struct hci_chan *chan = NULL;
abc5de8f 2326 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2327 struct hci_conn *conn;
73d80deb
LAD
2328 int cnt, q, conn_num = 0;
2329
2330 BT_DBG("%s", hdev->name);
2331
bf4c6325
GP
2332 rcu_read_lock();
2333
2334 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2335 struct hci_chan *tmp;
2336
2337 if (conn->type != type)
2338 continue;
2339
2340 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2341 continue;
2342
2343 conn_num++;
2344
8192edef 2345 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2346 struct sk_buff *skb;
2347
2348 if (skb_queue_empty(&tmp->data_q))
2349 continue;
2350
2351 skb = skb_peek(&tmp->data_q);
2352 if (skb->priority < cur_prio)
2353 continue;
2354
2355 if (skb->priority > cur_prio) {
2356 num = 0;
2357 min = ~0;
2358 cur_prio = skb->priority;
2359 }
2360
2361 num++;
2362
2363 if (conn->sent < min) {
2364 min = conn->sent;
2365 chan = tmp;
2366 }
2367 }
2368
2369 if (hci_conn_num(hdev, type) == conn_num)
2370 break;
2371 }
2372
bf4c6325
GP
2373 rcu_read_unlock();
2374
73d80deb
LAD
2375 if (!chan)
2376 return NULL;
2377
2378 switch (chan->conn->type) {
2379 case ACL_LINK:
2380 cnt = hdev->acl_cnt;
2381 break;
2382 case SCO_LINK:
2383 case ESCO_LINK:
2384 cnt = hdev->sco_cnt;
2385 break;
2386 case LE_LINK:
2387 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2388 break;
2389 default:
2390 cnt = 0;
2391 BT_ERR("Unknown link type");
2392 }
2393
2394 q = cnt / num;
2395 *quote = q ? q : 1;
2396 BT_DBG("chan %p quote %d", chan, *quote);
2397 return chan;
2398}
2399
02b20f0b
LAD
2400static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2401{
2402 struct hci_conn_hash *h = &hdev->conn_hash;
2403 struct hci_conn *conn;
2404 int num = 0;
2405
2406 BT_DBG("%s", hdev->name);
2407
bf4c6325
GP
2408 rcu_read_lock();
2409
2410 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2411 struct hci_chan *chan;
2412
2413 if (conn->type != type)
2414 continue;
2415
2416 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2417 continue;
2418
2419 num++;
2420
8192edef 2421 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2422 struct sk_buff *skb;
2423
2424 if (chan->sent) {
2425 chan->sent = 0;
2426 continue;
2427 }
2428
2429 if (skb_queue_empty(&chan->data_q))
2430 continue;
2431
2432 skb = skb_peek(&chan->data_q);
2433 if (skb->priority >= HCI_PRIO_MAX - 1)
2434 continue;
2435
2436 skb->priority = HCI_PRIO_MAX - 1;
2437
2438 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2439 skb->priority);
02b20f0b
LAD
2440 }
2441
2442 if (hci_conn_num(hdev, type) == num)
2443 break;
2444 }
bf4c6325
GP
2445
2446 rcu_read_unlock();
2447
02b20f0b
LAD
2448}
2449
b71d385a
AE
2450static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2451{
2452 /* Calculate count of blocks used by this packet */
2453 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2454}
2455
6039aa73 2456static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2457{
1da177e4
LT
2458 if (!test_bit(HCI_RAW, &hdev->flags)) {
2459 /* ACL tx timeout must be longer than maximum
2460 * link supervision timeout (40.9 seconds) */
63d2bc1b 2461 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2462 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2463 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2464 }
63d2bc1b 2465}
1da177e4 2466
6039aa73 2467static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2468{
2469 unsigned int cnt = hdev->acl_cnt;
2470 struct hci_chan *chan;
2471 struct sk_buff *skb;
2472 int quote;
2473
2474 __check_timeout(hdev, cnt);
04837f64 2475
73d80deb 2476 while (hdev->acl_cnt &&
a8c5fb1a 2477 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2478 u32 priority = (skb_peek(&chan->data_q))->priority;
2479 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2480 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2481 skb->len, skb->priority);
73d80deb 2482
ec1cce24
LAD
2483 /* Stop if priority has changed */
2484 if (skb->priority < priority)
2485 break;
2486
2487 skb = skb_dequeue(&chan->data_q);
2488
73d80deb 2489 hci_conn_enter_active_mode(chan->conn,
04124681 2490 bt_cb(skb)->force_active);
04837f64 2491
1da177e4
LT
2492 hci_send_frame(skb);
2493 hdev->acl_last_tx = jiffies;
2494
2495 hdev->acl_cnt--;
73d80deb
LAD
2496 chan->sent++;
2497 chan->conn->sent++;
1da177e4
LT
2498 }
2499 }
02b20f0b
LAD
2500
2501 if (cnt != hdev->acl_cnt)
2502 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2503}
2504
6039aa73 2505static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2506{
63d2bc1b 2507 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2508 struct hci_chan *chan;
2509 struct sk_buff *skb;
2510 int quote;
b71d385a 2511
63d2bc1b 2512 __check_timeout(hdev, cnt);
b71d385a
AE
2513
2514 while (hdev->block_cnt > 0 &&
a8c5fb1a 2515 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
b71d385a
AE
2516 u32 priority = (skb_peek(&chan->data_q))->priority;
2517 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2518 int blocks;
2519
2520 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2521 skb->len, skb->priority);
b71d385a
AE
2522
2523 /* Stop if priority has changed */
2524 if (skb->priority < priority)
2525 break;
2526
2527 skb = skb_dequeue(&chan->data_q);
2528
2529 blocks = __get_blocks(hdev, skb);
2530 if (blocks > hdev->block_cnt)
2531 return;
2532
2533 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2534 bt_cb(skb)->force_active);
b71d385a
AE
2535
2536 hci_send_frame(skb);
2537 hdev->acl_last_tx = jiffies;
2538
2539 hdev->block_cnt -= blocks;
2540 quote -= blocks;
2541
2542 chan->sent += blocks;
2543 chan->conn->sent += blocks;
2544 }
2545 }
2546
2547 if (cnt != hdev->block_cnt)
2548 hci_prio_recalculate(hdev, ACL_LINK);
2549}
2550
6039aa73 2551static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
2552{
2553 BT_DBG("%s", hdev->name);
2554
2555 if (!hci_conn_num(hdev, ACL_LINK))
2556 return;
2557
2558 switch (hdev->flow_ctl_mode) {
2559 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2560 hci_sched_acl_pkt(hdev);
2561 break;
2562
2563 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2564 hci_sched_acl_blk(hdev);
2565 break;
2566 }
2567}
2568
1da177e4 2569/* Schedule SCO */
6039aa73 2570static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
2571{
2572 struct hci_conn *conn;
2573 struct sk_buff *skb;
2574 int quote;
2575
2576 BT_DBG("%s", hdev->name);
2577
52087a79
LAD
2578 if (!hci_conn_num(hdev, SCO_LINK))
2579 return;
2580
1da177e4
LT
2581 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2582 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2583 BT_DBG("skb %p len %d", skb, skb->len);
2584 hci_send_frame(skb);
2585
2586 conn->sent++;
2587 if (conn->sent == ~0)
2588 conn->sent = 0;
2589 }
2590 }
2591}
2592
6039aa73 2593static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
2594{
2595 struct hci_conn *conn;
2596 struct sk_buff *skb;
2597 int quote;
2598
2599 BT_DBG("%s", hdev->name);
2600
52087a79
LAD
2601 if (!hci_conn_num(hdev, ESCO_LINK))
2602 return;
2603
8fc9ced3
GP
2604 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2605 &quote))) {
b6a0dc82
MH
2606 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2607 BT_DBG("skb %p len %d", skb, skb->len);
2608 hci_send_frame(skb);
2609
2610 conn->sent++;
2611 if (conn->sent == ~0)
2612 conn->sent = 0;
2613 }
2614 }
2615}
2616
6039aa73 2617static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 2618{
73d80deb 2619 struct hci_chan *chan;
6ed58ec5 2620 struct sk_buff *skb;
02b20f0b 2621 int quote, cnt, tmp;
6ed58ec5
VT
2622
2623 BT_DBG("%s", hdev->name);
2624
52087a79
LAD
2625 if (!hci_conn_num(hdev, LE_LINK))
2626 return;
2627
6ed58ec5
VT
2628 if (!test_bit(HCI_RAW, &hdev->flags)) {
2629 /* LE tx timeout must be longer than maximum
2630 * link supervision timeout (40.9 seconds) */
bae1f5d9 2631 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 2632 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2633 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2634 }
2635
2636 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2637 tmp = cnt;
73d80deb 2638 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2639 u32 priority = (skb_peek(&chan->data_q))->priority;
2640 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2641 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2642 skb->len, skb->priority);
6ed58ec5 2643
ec1cce24
LAD
2644 /* Stop if priority has changed */
2645 if (skb->priority < priority)
2646 break;
2647
2648 skb = skb_dequeue(&chan->data_q);
2649
6ed58ec5
VT
2650 hci_send_frame(skb);
2651 hdev->le_last_tx = jiffies;
2652
2653 cnt--;
73d80deb
LAD
2654 chan->sent++;
2655 chan->conn->sent++;
6ed58ec5
VT
2656 }
2657 }
73d80deb 2658
6ed58ec5
VT
2659 if (hdev->le_pkts)
2660 hdev->le_cnt = cnt;
2661 else
2662 hdev->acl_cnt = cnt;
02b20f0b
LAD
2663
2664 if (cnt != tmp)
2665 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2666}
2667
3eff45ea 2668static void hci_tx_work(struct work_struct *work)
1da177e4 2669{
3eff45ea 2670 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2671 struct sk_buff *skb;
2672
6ed58ec5 2673 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 2674 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2675
2676 /* Schedule queues and send stuff to HCI driver */
2677
2678 hci_sched_acl(hdev);
2679
2680 hci_sched_sco(hdev);
2681
b6a0dc82
MH
2682 hci_sched_esco(hdev);
2683
6ed58ec5
VT
2684 hci_sched_le(hdev);
2685
1da177e4
LT
2686 /* Send next queued raw (unknown type) packet */
2687 while ((skb = skb_dequeue(&hdev->raw_q)))
2688 hci_send_frame(skb);
1da177e4
LT
2689}
2690
25985edc 2691/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2692
2693/* ACL data packet */
6039aa73 2694static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2695{
2696 struct hci_acl_hdr *hdr = (void *) skb->data;
2697 struct hci_conn *conn;
2698 __u16 handle, flags;
2699
2700 skb_pull(skb, HCI_ACL_HDR_SIZE);
2701
2702 handle = __le16_to_cpu(hdr->handle);
2703 flags = hci_flags(handle);
2704 handle = hci_handle(handle);
2705
f0e09510 2706 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 2707 handle, flags);
1da177e4
LT
2708
2709 hdev->stat.acl_rx++;
2710
2711 hci_dev_lock(hdev);
2712 conn = hci_conn_hash_lookup_handle(hdev, handle);
2713 hci_dev_unlock(hdev);
8e87d142 2714
1da177e4 2715 if (conn) {
65983fc7 2716 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2717
671267bf
JH
2718 hci_dev_lock(hdev);
2719 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2720 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2721 mgmt_device_connected(hdev, &conn->dst, conn->type,
2722 conn->dst_type, 0, NULL, 0,
2723 conn->dev_class);
2724 hci_dev_unlock(hdev);
2725
1da177e4 2726 /* Send to upper protocol */
686ebf28
UF
2727 l2cap_recv_acldata(conn, skb, flags);
2728 return;
1da177e4 2729 } else {
8e87d142 2730 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 2731 hdev->name, handle);
1da177e4
LT
2732 }
2733
2734 kfree_skb(skb);
2735}
2736
2737/* SCO data packet */
6039aa73 2738static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2739{
2740 struct hci_sco_hdr *hdr = (void *) skb->data;
2741 struct hci_conn *conn;
2742 __u16 handle;
2743
2744 skb_pull(skb, HCI_SCO_HDR_SIZE);
2745
2746 handle = __le16_to_cpu(hdr->handle);
2747
f0e09510 2748 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
2749
2750 hdev->stat.sco_rx++;
2751
2752 hci_dev_lock(hdev);
2753 conn = hci_conn_hash_lookup_handle(hdev, handle);
2754 hci_dev_unlock(hdev);
2755
2756 if (conn) {
1da177e4 2757 /* Send to upper protocol */
686ebf28
UF
2758 sco_recv_scodata(conn, skb);
2759 return;
1da177e4 2760 } else {
8e87d142 2761 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 2762 hdev->name, handle);
1da177e4
LT
2763 }
2764
2765 kfree_skb(skb);
2766}
2767
b78752cc 2768static void hci_rx_work(struct work_struct *work)
1da177e4 2769{
b78752cc 2770 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2771 struct sk_buff *skb;
2772
2773 BT_DBG("%s", hdev->name);
2774
1da177e4 2775 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2776 /* Send copy to monitor */
2777 hci_send_to_monitor(hdev, skb);
2778
1da177e4
LT
2779 if (atomic_read(&hdev->promisc)) {
2780 /* Send copy to the sockets */
470fe1b5 2781 hci_send_to_sock(hdev, skb);
1da177e4
LT
2782 }
2783
2784 if (test_bit(HCI_RAW, &hdev->flags)) {
2785 kfree_skb(skb);
2786 continue;
2787 }
2788
2789 if (test_bit(HCI_INIT, &hdev->flags)) {
2790 /* Don't process data packets in this states. */
0d48d939 2791 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2792 case HCI_ACLDATA_PKT:
2793 case HCI_SCODATA_PKT:
2794 kfree_skb(skb);
2795 continue;
3ff50b79 2796 }
1da177e4
LT
2797 }
2798
2799 /* Process frame */
0d48d939 2800 switch (bt_cb(skb)->pkt_type) {
1da177e4 2801 case HCI_EVENT_PKT:
b78752cc 2802 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2803 hci_event_packet(hdev, skb);
2804 break;
2805
2806 case HCI_ACLDATA_PKT:
2807 BT_DBG("%s ACL data packet", hdev->name);
2808 hci_acldata_packet(hdev, skb);
2809 break;
2810
2811 case HCI_SCODATA_PKT:
2812 BT_DBG("%s SCO data packet", hdev->name);
2813 hci_scodata_packet(hdev, skb);
2814 break;
2815
2816 default:
2817 kfree_skb(skb);
2818 break;
2819 }
2820 }
1da177e4
LT
2821}
2822
c347b765 2823static void hci_cmd_work(struct work_struct *work)
1da177e4 2824{
c347b765 2825 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2826 struct sk_buff *skb;
2827
2104786b
AE
2828 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2829 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 2830
1da177e4 2831 /* Send queued commands */
5a08ecce
AE
2832 if (atomic_read(&hdev->cmd_cnt)) {
2833 skb = skb_dequeue(&hdev->cmd_q);
2834 if (!skb)
2835 return;
2836
7585b97a 2837 kfree_skb(hdev->sent_cmd);
1da177e4 2838
70f23020
AE
2839 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2840 if (hdev->sent_cmd) {
1da177e4
LT
2841 atomic_dec(&hdev->cmd_cnt);
2842 hci_send_frame(skb);
7bdb8a5c
SJ
2843 if (test_bit(HCI_RESET, &hdev->flags))
2844 del_timer(&hdev->cmd_timer);
2845 else
2846 mod_timer(&hdev->cmd_timer,
5f246e89 2847 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
2848 } else {
2849 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2850 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2851 }
2852 }
2853}
2519a1fc
AG
2854
2855int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2856{
2857 /* General inquiry access code (GIAC) */
2858 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2859 struct hci_cp_inquiry cp;
2860
2861 BT_DBG("%s", hdev->name);
2862
2863 if (test_bit(HCI_INQUIRY, &hdev->flags))
2864 return -EINPROGRESS;
2865
4663262c
JH
2866 inquiry_cache_flush(hdev);
2867
2519a1fc
AG
2868 memset(&cp, 0, sizeof(cp));
2869 memcpy(&cp.lap, lap, sizeof(cp.lap));
2870 cp.length = length;
2871
2872 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2873}
023d5049
AG
2874
2875int hci_cancel_inquiry(struct hci_dev *hdev)
2876{
2877 BT_DBG("%s", hdev->name);
2878
2879 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 2880 return -EALREADY;
023d5049
AG
2881
2882 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2883}
31f7956c
AG
2884
2885u8 bdaddr_to_le(u8 bdaddr_type)
2886{
2887 switch (bdaddr_type) {
2888 case BDADDR_LE_PUBLIC:
2889 return ADDR_LE_DEV_PUBLIC;
2890
2891 default:
2892 /* Fallback to LE Random address type */
2893 return ADDR_LE_DEV_RANDOM;
2894 }
2895}