Bluetooth: Fix coding style in include/net/bluetooth
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4 42#include <linux/interrupt.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
70f23020 48#include <linux/uaccess.h>
1da177e4
LT
49#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
ab81cbf9
JH
54#define AUTO_OFF_TIMEOUT 2000
55
b78752cc 56static void hci_rx_work(struct work_struct *work);
c347b765 57static void hci_cmd_work(struct work_struct *work);
3eff45ea 58static void hci_tx_work(struct work_struct *work);
1da177e4 59
1da177e4
LT
60/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
75/* ---- HCI requests ---- */
76
23bb5763 77void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 78{
23bb5763
JH
79 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
a5040efa
JH
81 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
83 */
75fb0e32
JH
84 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1036b890 86 u16 opcode = __le16_to_cpu(sent->opcode);
75fb0e32
JH
87 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
1036b890 96 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
75fb0e32
JH
97 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
23bb5763 105 return;
75fb0e32 106 }
1da177e4
LT
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
a8c5fb1a
GP
127static int __hci_request(struct hci_dev *hdev,
128 void (*req)(struct hci_dev *hdev, unsigned long opt),
129 unsigned long opt, __u32 timeout)
1da177e4
LT
130{
131 DECLARE_WAITQUEUE(wait, current);
132 int err = 0;
133
134 BT_DBG("%s start", hdev->name);
135
136 hdev->req_status = HCI_REQ_PEND;
137
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
140
141 req(hdev, opt);
142 schedule_timeout(timeout);
143
144 remove_wait_queue(&hdev->req_wait_q, &wait);
145
146 if (signal_pending(current))
147 return -EINTR;
148
149 switch (hdev->req_status) {
150 case HCI_REQ_DONE:
e175072f 151 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
152 break;
153
154 case HCI_REQ_CANCELED:
155 err = -hdev->req_result;
156 break;
157
158 default:
159 err = -ETIMEDOUT;
160 break;
3ff50b79 161 }
1da177e4 162
a5040efa 163 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 return err;
168}
169
6039aa73
GP
170static int hci_request(struct hci_dev *hdev,
171 void (*req)(struct hci_dev *hdev, unsigned long opt),
172 unsigned long opt, __u32 timeout)
1da177e4
LT
173{
174 int ret;
175
7c6a329e
MH
176 if (!test_bit(HCI_UP, &hdev->flags))
177 return -ENETDOWN;
178
1da177e4
LT
179 /* Serialize all requests */
180 hci_req_lock(hdev);
181 ret = __hci_request(hdev, req, opt, timeout);
182 hci_req_unlock(hdev);
183
184 return ret;
185}
186
187static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
188{
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Reset device */
f630cf0d 192 set_bit(HCI_RESET, &hdev->flags);
a9de9248 193 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
194}
195
e61ef499 196static void bredr_init(struct hci_dev *hdev)
1da177e4 197{
b0916ea0 198 struct hci_cp_delete_stored_link_key cp;
1ebb9252 199 __le16 param;
89f2783d 200 __u8 flt_type;
1da177e4 201
2455a3ea
AE
202 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
203
1da177e4
LT
204 /* Mandatory initialization */
205
206 /* Reset */
a6c511c6 207 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
e61ef499
AE
208 set_bit(HCI_RESET, &hdev->flags);
209 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 210 }
1da177e4
LT
211
212 /* Read Local Supported Features */
a9de9248 213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 214
1143e5a6 215 /* Read Local Version */
a9de9248 216 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 217
1da177e4 218 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 219 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 220
1da177e4 221 /* Read BD Address */
a9de9248
MH
222 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
223
224 /* Read Class of Device */
225 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
226
227 /* Read Local Name */
228 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
229
230 /* Read Voice Setting */
a9de9248 231 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
232
233 /* Optional initialization */
234
235 /* Clear Event Filters */
89f2783d 236 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 237 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 238
1da177e4 239 /* Connection accept timeout ~20 secs */
aca3192c 240 param = cpu_to_le16(0x7d00);
a9de9248 241 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
242
243 bacpy(&cp.bdaddr, BDADDR_ANY);
244 cp.delete_all = 1;
245 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
246}
247
e61ef499
AE
248static void amp_init(struct hci_dev *hdev)
249{
2455a3ea
AE
250 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
251
e61ef499
AE
252 /* Reset */
253 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
254
255 /* Read Local Version */
256 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
257
258 /* Read Local AMP Info */
259 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e61ef499
AE
260}
261
262static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
263{
264 struct sk_buff *skb;
265
266 BT_DBG("%s %ld", hdev->name, opt);
267
268 /* Driver initialization */
269
270 /* Special commands */
271 while ((skb = skb_dequeue(&hdev->driver_init))) {
272 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
273 skb->dev = (void *) hdev;
274
275 skb_queue_tail(&hdev->cmd_q, skb);
276 queue_work(hdev->workqueue, &hdev->cmd_work);
277 }
278 skb_queue_purge(&hdev->driver_init);
279
280 switch (hdev->dev_type) {
281 case HCI_BREDR:
282 bredr_init(hdev);
283 break;
284
285 case HCI_AMP:
286 amp_init(hdev);
287 break;
288
289 default:
290 BT_ERR("Unknown device type %d", hdev->dev_type);
291 break;
292 }
293
294}
295
6ed58ec5
VT
296static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
297{
298 BT_DBG("%s", hdev->name);
299
300 /* Read LE buffer size */
301 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
302}
303
1da177e4
LT
304static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
305{
306 __u8 scan = opt;
307
308 BT_DBG("%s %x", hdev->name, scan);
309
310 /* Inquiry and Page scans */
a9de9248 311 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
312}
313
314static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
315{
316 __u8 auth = opt;
317
318 BT_DBG("%s %x", hdev->name, auth);
319
320 /* Authentication */
a9de9248 321 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
322}
323
324static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
325{
326 __u8 encrypt = opt;
327
328 BT_DBG("%s %x", hdev->name, encrypt);
329
e4e8e37c 330 /* Encryption */
a9de9248 331 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
332}
333
e4e8e37c
MH
334static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
335{
336 __le16 policy = cpu_to_le16(opt);
337
a418b893 338 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
339
340 /* Default link policy */
341 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
342}
343
8e87d142 344/* Get HCI device by index.
1da177e4
LT
345 * Device is held on return. */
346struct hci_dev *hci_dev_get(int index)
347{
8035ded4 348 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
349
350 BT_DBG("%d", index);
351
352 if (index < 0)
353 return NULL;
354
355 read_lock(&hci_dev_list_lock);
8035ded4 356 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
357 if (d->id == index) {
358 hdev = hci_dev_hold(d);
359 break;
360 }
361 }
362 read_unlock(&hci_dev_list_lock);
363 return hdev;
364}
1da177e4
LT
365
366/* ---- Inquiry support ---- */
ff9ef578 367
30dc78e1
JH
368bool hci_discovery_active(struct hci_dev *hdev)
369{
370 struct discovery_state *discov = &hdev->discovery;
371
6fbe195d 372 switch (discov->state) {
343f935b 373 case DISCOVERY_FINDING:
6fbe195d 374 case DISCOVERY_RESOLVING:
30dc78e1
JH
375 return true;
376
6fbe195d
AG
377 default:
378 return false;
379 }
30dc78e1
JH
380}
381
ff9ef578
JH
382void hci_discovery_set_state(struct hci_dev *hdev, int state)
383{
384 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
385
386 if (hdev->discovery.state == state)
387 return;
388
389 switch (state) {
390 case DISCOVERY_STOPPED:
7b99b659
AG
391 if (hdev->discovery.state != DISCOVERY_STARTING)
392 mgmt_discovering(hdev, 0);
ff9ef578
JH
393 break;
394 case DISCOVERY_STARTING:
395 break;
343f935b 396 case DISCOVERY_FINDING:
ff9ef578
JH
397 mgmt_discovering(hdev, 1);
398 break;
30dc78e1
JH
399 case DISCOVERY_RESOLVING:
400 break;
ff9ef578
JH
401 case DISCOVERY_STOPPING:
402 break;
403 }
404
405 hdev->discovery.state = state;
406}
407
1da177e4
LT
408static void inquiry_cache_flush(struct hci_dev *hdev)
409{
30883512 410 struct discovery_state *cache = &hdev->discovery;
b57c1a56 411 struct inquiry_entry *p, *n;
1da177e4 412
561aafbc
JH
413 list_for_each_entry_safe(p, n, &cache->all, all) {
414 list_del(&p->all);
b57c1a56 415 kfree(p);
1da177e4 416 }
561aafbc
JH
417
418 INIT_LIST_HEAD(&cache->unknown);
419 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
420}
421
a8c5fb1a
GP
422struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
423 bdaddr_t *bdaddr)
1da177e4 424{
30883512 425 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
426 struct inquiry_entry *e;
427
428 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
429
561aafbc
JH
430 list_for_each_entry(e, &cache->all, all) {
431 if (!bacmp(&e->data.bdaddr, bdaddr))
432 return e;
433 }
434
435 return NULL;
436}
437
438struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 439 bdaddr_t *bdaddr)
561aafbc 440{
30883512 441 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
442 struct inquiry_entry *e;
443
444 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
445
446 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 447 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
448 return e;
449 }
450
451 return NULL;
1da177e4
LT
452}
453
30dc78e1 454struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
455 bdaddr_t *bdaddr,
456 int state)
30dc78e1
JH
457{
458 struct discovery_state *cache = &hdev->discovery;
459 struct inquiry_entry *e;
460
461 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
462
463 list_for_each_entry(e, &cache->resolve, list) {
464 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
465 return e;
466 if (!bacmp(&e->data.bdaddr, bdaddr))
467 return e;
468 }
469
470 return NULL;
471}
472
a3d4e20a 473void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 474 struct inquiry_entry *ie)
a3d4e20a
JH
475{
476 struct discovery_state *cache = &hdev->discovery;
477 struct list_head *pos = &cache->resolve;
478 struct inquiry_entry *p;
479
480 list_del(&ie->list);
481
482 list_for_each_entry(p, &cache->resolve, list) {
483 if (p->name_state != NAME_PENDING &&
a8c5fb1a 484 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
485 break;
486 pos = &p->list;
487 }
488
489 list_add(&ie->list, pos);
490}
491
3175405b 492bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 493 bool name_known, bool *ssp)
1da177e4 494{
30883512 495 struct discovery_state *cache = &hdev->discovery;
70f23020 496 struct inquiry_entry *ie;
1da177e4
LT
497
498 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
499
388fc8fa
JH
500 if (ssp)
501 *ssp = data->ssp_mode;
502
70f23020 503 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 504 if (ie) {
388fc8fa
JH
505 if (ie->data.ssp_mode && ssp)
506 *ssp = true;
507
a3d4e20a 508 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 509 data->rssi != ie->data.rssi) {
a3d4e20a
JH
510 ie->data.rssi = data->rssi;
511 hci_inquiry_cache_update_resolve(hdev, ie);
512 }
513
561aafbc 514 goto update;
a3d4e20a 515 }
561aafbc
JH
516
517 /* Entry not in the cache. Add new one. */
518 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
519 if (!ie)
3175405b 520 return false;
561aafbc
JH
521
522 list_add(&ie->all, &cache->all);
523
524 if (name_known) {
525 ie->name_state = NAME_KNOWN;
526 } else {
527 ie->name_state = NAME_NOT_KNOWN;
528 list_add(&ie->list, &cache->unknown);
529 }
70f23020 530
561aafbc
JH
531update:
532 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 533 ie->name_state != NAME_PENDING) {
561aafbc
JH
534 ie->name_state = NAME_KNOWN;
535 list_del(&ie->list);
1da177e4
LT
536 }
537
70f23020
AE
538 memcpy(&ie->data, data, sizeof(*data));
539 ie->timestamp = jiffies;
1da177e4 540 cache->timestamp = jiffies;
3175405b
JH
541
542 if (ie->name_state == NAME_NOT_KNOWN)
543 return false;
544
545 return true;
1da177e4
LT
546}
547
548static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
549{
30883512 550 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
551 struct inquiry_info *info = (struct inquiry_info *) buf;
552 struct inquiry_entry *e;
553 int copied = 0;
554
561aafbc 555 list_for_each_entry(e, &cache->all, all) {
1da177e4 556 struct inquiry_data *data = &e->data;
b57c1a56
JH
557
558 if (copied >= num)
559 break;
560
1da177e4
LT
561 bacpy(&info->bdaddr, &data->bdaddr);
562 info->pscan_rep_mode = data->pscan_rep_mode;
563 info->pscan_period_mode = data->pscan_period_mode;
564 info->pscan_mode = data->pscan_mode;
565 memcpy(info->dev_class, data->dev_class, 3);
566 info->clock_offset = data->clock_offset;
b57c1a56 567
1da177e4 568 info++;
b57c1a56 569 copied++;
1da177e4
LT
570 }
571
572 BT_DBG("cache %p, copied %d", cache, copied);
573 return copied;
574}
575
576static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
577{
578 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
579 struct hci_cp_inquiry cp;
580
581 BT_DBG("%s", hdev->name);
582
583 if (test_bit(HCI_INQUIRY, &hdev->flags))
584 return;
585
586 /* Start Inquiry */
587 memcpy(&cp.lap, &ir->lap, 3);
588 cp.length = ir->length;
589 cp.num_rsp = ir->num_rsp;
a9de9248 590 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
591}
592
593int hci_inquiry(void __user *arg)
594{
595 __u8 __user *ptr = arg;
596 struct hci_inquiry_req ir;
597 struct hci_dev *hdev;
598 int err = 0, do_inquiry = 0, max_rsp;
599 long timeo;
600 __u8 *buf;
601
602 if (copy_from_user(&ir, ptr, sizeof(ir)))
603 return -EFAULT;
604
5a08ecce
AE
605 hdev = hci_dev_get(ir.dev_id);
606 if (!hdev)
1da177e4
LT
607 return -ENODEV;
608
09fd0de5 609 hci_dev_lock(hdev);
8e87d142 610 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 611 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
612 inquiry_cache_flush(hdev);
613 do_inquiry = 1;
614 }
09fd0de5 615 hci_dev_unlock(hdev);
1da177e4 616
04837f64 617 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
618
619 if (do_inquiry) {
620 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
621 if (err < 0)
622 goto done;
623 }
1da177e4
LT
624
625 /* for unlimited number of responses we will use buffer with 255 entries */
626 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
627
628 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
629 * copy it to the user space.
630 */
01df8c31 631 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 632 if (!buf) {
1da177e4
LT
633 err = -ENOMEM;
634 goto done;
635 }
636
09fd0de5 637 hci_dev_lock(hdev);
1da177e4 638 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 639 hci_dev_unlock(hdev);
1da177e4
LT
640
641 BT_DBG("num_rsp %d", ir.num_rsp);
642
643 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
644 ptr += sizeof(ir);
645 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 646 ir.num_rsp))
1da177e4 647 err = -EFAULT;
8e87d142 648 } else
1da177e4
LT
649 err = -EFAULT;
650
651 kfree(buf);
652
653done:
654 hci_dev_put(hdev);
655 return err;
656}
657
658/* ---- HCI ioctl helpers ---- */
659
660int hci_dev_open(__u16 dev)
661{
662 struct hci_dev *hdev;
663 int ret = 0;
664
5a08ecce
AE
665 hdev = hci_dev_get(dev);
666 if (!hdev)
1da177e4
LT
667 return -ENODEV;
668
669 BT_DBG("%s %p", hdev->name, hdev);
670
671 hci_req_lock(hdev);
672
94324962
JH
673 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
674 ret = -ENODEV;
675 goto done;
676 }
677
611b30f7
MH
678 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
679 ret = -ERFKILL;
680 goto done;
681 }
682
1da177e4
LT
683 if (test_bit(HCI_UP, &hdev->flags)) {
684 ret = -EALREADY;
685 goto done;
686 }
687
688 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
689 set_bit(HCI_RAW, &hdev->flags);
690
07e3b94a
AE
691 /* Treat all non BR/EDR controllers as raw devices if
692 enable_hs is not set */
693 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
694 set_bit(HCI_RAW, &hdev->flags);
695
1da177e4
LT
696 if (hdev->open(hdev)) {
697 ret = -EIO;
698 goto done;
699 }
700
701 if (!test_bit(HCI_RAW, &hdev->flags)) {
702 atomic_set(&hdev->cmd_cnt, 1);
703 set_bit(HCI_INIT, &hdev->flags);
a5040efa 704 hdev->init_last_cmd = 0;
1da177e4 705
04837f64 706 ret = __hci_request(hdev, hci_init_req, 0,
a8c5fb1a 707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 708
eead27da 709 if (lmp_host_le_capable(hdev))
6ed58ec5 710 ret = __hci_request(hdev, hci_le_init_req, 0,
a8c5fb1a 711 msecs_to_jiffies(HCI_INIT_TIMEOUT));
6ed58ec5 712
1da177e4
LT
713 clear_bit(HCI_INIT, &hdev->flags);
714 }
715
716 if (!ret) {
717 hci_dev_hold(hdev);
718 set_bit(HCI_UP, &hdev->flags);
719 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 720 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 721 hci_dev_lock(hdev);
744cf19e 722 mgmt_powered(hdev, 1);
09fd0de5 723 hci_dev_unlock(hdev);
56e5cb86 724 }
8e87d142 725 } else {
1da177e4 726 /* Init failed, cleanup */
3eff45ea 727 flush_work(&hdev->tx_work);
c347b765 728 flush_work(&hdev->cmd_work);
b78752cc 729 flush_work(&hdev->rx_work);
1da177e4
LT
730
731 skb_queue_purge(&hdev->cmd_q);
732 skb_queue_purge(&hdev->rx_q);
733
734 if (hdev->flush)
735 hdev->flush(hdev);
736
737 if (hdev->sent_cmd) {
738 kfree_skb(hdev->sent_cmd);
739 hdev->sent_cmd = NULL;
740 }
741
742 hdev->close(hdev);
743 hdev->flags = 0;
744 }
745
746done:
747 hci_req_unlock(hdev);
748 hci_dev_put(hdev);
749 return ret;
750}
751
752static int hci_dev_do_close(struct hci_dev *hdev)
753{
754 BT_DBG("%s %p", hdev->name, hdev);
755
28b75a89
AG
756 cancel_work_sync(&hdev->le_scan);
757
1da177e4
LT
758 hci_req_cancel(hdev, ENODEV);
759 hci_req_lock(hdev);
760
761 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 762 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
763 hci_req_unlock(hdev);
764 return 0;
765 }
766
3eff45ea
GP
767 /* Flush RX and TX works */
768 flush_work(&hdev->tx_work);
b78752cc 769 flush_work(&hdev->rx_work);
1da177e4 770
16ab91ab 771 if (hdev->discov_timeout > 0) {
e0f9309f 772 cancel_delayed_work(&hdev->discov_off);
16ab91ab 773 hdev->discov_timeout = 0;
5e5282bb 774 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
775 }
776
a8b2d5c2 777 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
778 cancel_delayed_work(&hdev->service_cache);
779
7ba8b4be
AG
780 cancel_delayed_work_sync(&hdev->le_scan_disable);
781
09fd0de5 782 hci_dev_lock(hdev);
1da177e4
LT
783 inquiry_cache_flush(hdev);
784 hci_conn_hash_flush(hdev);
09fd0de5 785 hci_dev_unlock(hdev);
1da177e4
LT
786
787 hci_notify(hdev, HCI_DEV_DOWN);
788
789 if (hdev->flush)
790 hdev->flush(hdev);
791
792 /* Reset device */
793 skb_queue_purge(&hdev->cmd_q);
794 atomic_set(&hdev->cmd_cnt, 1);
8af59467 795 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 796 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 797 set_bit(HCI_INIT, &hdev->flags);
04837f64 798 __hci_request(hdev, hci_reset_req, 0,
a8c5fb1a 799 msecs_to_jiffies(250));
1da177e4
LT
800 clear_bit(HCI_INIT, &hdev->flags);
801 }
802
c347b765
GP
803 /* flush cmd work */
804 flush_work(&hdev->cmd_work);
1da177e4
LT
805
806 /* Drop queues */
807 skb_queue_purge(&hdev->rx_q);
808 skb_queue_purge(&hdev->cmd_q);
809 skb_queue_purge(&hdev->raw_q);
810
811 /* Drop last sent command */
812 if (hdev->sent_cmd) {
b79f44c1 813 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
814 kfree_skb(hdev->sent_cmd);
815 hdev->sent_cmd = NULL;
816 }
817
818 /* After this point our queues are empty
819 * and no tasks are scheduled. */
820 hdev->close(hdev);
821
8ee56540
MH
822 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
823 hci_dev_lock(hdev);
824 mgmt_powered(hdev, 0);
825 hci_dev_unlock(hdev);
826 }
5add6af8 827
1da177e4
LT
828 /* Clear flags */
829 hdev->flags = 0;
830
e59fda8d 831 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 832 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 833
1da177e4
LT
834 hci_req_unlock(hdev);
835
836 hci_dev_put(hdev);
837 return 0;
838}
839
840int hci_dev_close(__u16 dev)
841{
842 struct hci_dev *hdev;
843 int err;
844
70f23020
AE
845 hdev = hci_dev_get(dev);
846 if (!hdev)
1da177e4 847 return -ENODEV;
8ee56540
MH
848
849 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
850 cancel_delayed_work(&hdev->power_off);
851
1da177e4 852 err = hci_dev_do_close(hdev);
8ee56540 853
1da177e4
LT
854 hci_dev_put(hdev);
855 return err;
856}
857
858int hci_dev_reset(__u16 dev)
859{
860 struct hci_dev *hdev;
861 int ret = 0;
862
70f23020
AE
863 hdev = hci_dev_get(dev);
864 if (!hdev)
1da177e4
LT
865 return -ENODEV;
866
867 hci_req_lock(hdev);
1da177e4
LT
868
869 if (!test_bit(HCI_UP, &hdev->flags))
870 goto done;
871
872 /* Drop queues */
873 skb_queue_purge(&hdev->rx_q);
874 skb_queue_purge(&hdev->cmd_q);
875
09fd0de5 876 hci_dev_lock(hdev);
1da177e4
LT
877 inquiry_cache_flush(hdev);
878 hci_conn_hash_flush(hdev);
09fd0de5 879 hci_dev_unlock(hdev);
1da177e4
LT
880
881 if (hdev->flush)
882 hdev->flush(hdev);
883
8e87d142 884 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 885 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
886
887 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64 888 ret = __hci_request(hdev, hci_reset_req, 0,
a8c5fb1a 889 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
890
891done:
1da177e4
LT
892 hci_req_unlock(hdev);
893 hci_dev_put(hdev);
894 return ret;
895}
896
897int hci_dev_reset_stat(__u16 dev)
898{
899 struct hci_dev *hdev;
900 int ret = 0;
901
70f23020
AE
902 hdev = hci_dev_get(dev);
903 if (!hdev)
1da177e4
LT
904 return -ENODEV;
905
906 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
907
908 hci_dev_put(hdev);
909
910 return ret;
911}
912
913int hci_dev_cmd(unsigned int cmd, void __user *arg)
914{
915 struct hci_dev *hdev;
916 struct hci_dev_req dr;
917 int err = 0;
918
919 if (copy_from_user(&dr, arg, sizeof(dr)))
920 return -EFAULT;
921
70f23020
AE
922 hdev = hci_dev_get(dr.dev_id);
923 if (!hdev)
1da177e4
LT
924 return -ENODEV;
925
926 switch (cmd) {
927 case HCISETAUTH:
04837f64 928 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
a8c5fb1a 929 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
930 break;
931
932 case HCISETENCRYPT:
933 if (!lmp_encrypt_capable(hdev)) {
934 err = -EOPNOTSUPP;
935 break;
936 }
937
938 if (!test_bit(HCI_AUTH, &hdev->flags)) {
939 /* Auth must be enabled first */
04837f64 940 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
a8c5fb1a 941 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
942 if (err)
943 break;
944 }
945
04837f64 946 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
a8c5fb1a 947 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
948 break;
949
950 case HCISETSCAN:
04837f64 951 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
a8c5fb1a 952 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
953 break;
954
1da177e4 955 case HCISETLINKPOL:
e4e8e37c 956 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
a8c5fb1a 957 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
958 break;
959
960 case HCISETLINKMODE:
e4e8e37c
MH
961 hdev->link_mode = ((__u16) dr.dev_opt) &
962 (HCI_LM_MASTER | HCI_LM_ACCEPT);
963 break;
964
965 case HCISETPTYPE:
966 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
967 break;
968
969 case HCISETACLMTU:
e4e8e37c
MH
970 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
971 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
972 break;
973
974 case HCISETSCOMTU:
e4e8e37c
MH
975 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
976 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
977 break;
978
979 default:
980 err = -EINVAL;
981 break;
982 }
e4e8e37c 983
1da177e4
LT
984 hci_dev_put(hdev);
985 return err;
986}
987
988int hci_get_dev_list(void __user *arg)
989{
8035ded4 990 struct hci_dev *hdev;
1da177e4
LT
991 struct hci_dev_list_req *dl;
992 struct hci_dev_req *dr;
1da177e4
LT
993 int n = 0, size, err;
994 __u16 dev_num;
995
996 if (get_user(dev_num, (__u16 __user *) arg))
997 return -EFAULT;
998
999 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1000 return -EINVAL;
1001
1002 size = sizeof(*dl) + dev_num * sizeof(*dr);
1003
70f23020
AE
1004 dl = kzalloc(size, GFP_KERNEL);
1005 if (!dl)
1da177e4
LT
1006 return -ENOMEM;
1007
1008 dr = dl->dev_req;
1009
f20d09d5 1010 read_lock(&hci_dev_list_lock);
8035ded4 1011 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1012 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1013 cancel_delayed_work(&hdev->power_off);
c542a06c 1014
a8b2d5c2
JH
1015 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1016 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1017
1da177e4
LT
1018 (dr + n)->dev_id = hdev->id;
1019 (dr + n)->dev_opt = hdev->flags;
c542a06c 1020
1da177e4
LT
1021 if (++n >= dev_num)
1022 break;
1023 }
f20d09d5 1024 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1025
1026 dl->dev_num = n;
1027 size = sizeof(*dl) + n * sizeof(*dr);
1028
1029 err = copy_to_user(arg, dl, size);
1030 kfree(dl);
1031
1032 return err ? -EFAULT : 0;
1033}
1034
1035int hci_get_dev_info(void __user *arg)
1036{
1037 struct hci_dev *hdev;
1038 struct hci_dev_info di;
1039 int err = 0;
1040
1041 if (copy_from_user(&di, arg, sizeof(di)))
1042 return -EFAULT;
1043
70f23020
AE
1044 hdev = hci_dev_get(di.dev_id);
1045 if (!hdev)
1da177e4
LT
1046 return -ENODEV;
1047
a8b2d5c2 1048 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1049 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1050
a8b2d5c2
JH
1051 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1052 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1053
1da177e4
LT
1054 strcpy(di.name, hdev->name);
1055 di.bdaddr = hdev->bdaddr;
943da25d 1056 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1057 di.flags = hdev->flags;
1058 di.pkt_type = hdev->pkt_type;
1059 di.acl_mtu = hdev->acl_mtu;
1060 di.acl_pkts = hdev->acl_pkts;
1061 di.sco_mtu = hdev->sco_mtu;
1062 di.sco_pkts = hdev->sco_pkts;
1063 di.link_policy = hdev->link_policy;
1064 di.link_mode = hdev->link_mode;
1065
1066 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1067 memcpy(&di.features, &hdev->features, sizeof(di.features));
1068
1069 if (copy_to_user(arg, &di, sizeof(di)))
1070 err = -EFAULT;
1071
1072 hci_dev_put(hdev);
1073
1074 return err;
1075}
1076
1077/* ---- Interface to HCI drivers ---- */
1078
611b30f7
MH
1079static int hci_rfkill_set_block(void *data, bool blocked)
1080{
1081 struct hci_dev *hdev = data;
1082
1083 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1084
1085 if (!blocked)
1086 return 0;
1087
1088 hci_dev_do_close(hdev);
1089
1090 return 0;
1091}
1092
1093static const struct rfkill_ops hci_rfkill_ops = {
1094 .set_block = hci_rfkill_set_block,
1095};
1096
ab81cbf9
JH
1097static void hci_power_on(struct work_struct *work)
1098{
1099 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1100
1101 BT_DBG("%s", hdev->name);
1102
1103 if (hci_dev_open(hdev->id) < 0)
1104 return;
1105
a8b2d5c2 1106 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1107 schedule_delayed_work(&hdev->power_off,
a8c5fb1a 1108 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1109
a8b2d5c2 1110 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1111 mgmt_index_added(hdev);
ab81cbf9
JH
1112}
1113
1114static void hci_power_off(struct work_struct *work)
1115{
3243553f 1116 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1117 power_off.work);
ab81cbf9
JH
1118
1119 BT_DBG("%s", hdev->name);
1120
8ee56540 1121 hci_dev_do_close(hdev);
ab81cbf9
JH
1122}
1123
16ab91ab
JH
1124static void hci_discov_off(struct work_struct *work)
1125{
1126 struct hci_dev *hdev;
1127 u8 scan = SCAN_PAGE;
1128
1129 hdev = container_of(work, struct hci_dev, discov_off.work);
1130
1131 BT_DBG("%s", hdev->name);
1132
09fd0de5 1133 hci_dev_lock(hdev);
16ab91ab
JH
1134
1135 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1136
1137 hdev->discov_timeout = 0;
1138
09fd0de5 1139 hci_dev_unlock(hdev);
16ab91ab
JH
1140}
1141
2aeb9a1a
JH
1142int hci_uuids_clear(struct hci_dev *hdev)
1143{
1144 struct list_head *p, *n;
1145
1146 list_for_each_safe(p, n, &hdev->uuids) {
1147 struct bt_uuid *uuid;
1148
1149 uuid = list_entry(p, struct bt_uuid, list);
1150
1151 list_del(p);
1152 kfree(uuid);
1153 }
1154
1155 return 0;
1156}
1157
55ed8ca1
JH
1158int hci_link_keys_clear(struct hci_dev *hdev)
1159{
1160 struct list_head *p, *n;
1161
1162 list_for_each_safe(p, n, &hdev->link_keys) {
1163 struct link_key *key;
1164
1165 key = list_entry(p, struct link_key, list);
1166
1167 list_del(p);
1168 kfree(key);
1169 }
1170
1171 return 0;
1172}
1173
b899efaf
VCG
1174int hci_smp_ltks_clear(struct hci_dev *hdev)
1175{
1176 struct smp_ltk *k, *tmp;
1177
1178 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1179 list_del(&k->list);
1180 kfree(k);
1181 }
1182
1183 return 0;
1184}
1185
55ed8ca1
JH
1186struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1187{
8035ded4 1188 struct link_key *k;
55ed8ca1 1189
8035ded4 1190 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1191 if (bacmp(bdaddr, &k->bdaddr) == 0)
1192 return k;
55ed8ca1
JH
1193
1194 return NULL;
1195}
1196
745c0ce3 1197static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1198 u8 key_type, u8 old_key_type)
d25e28ab
JH
1199{
1200 /* Legacy key */
1201 if (key_type < 0x03)
745c0ce3 1202 return true;
d25e28ab
JH
1203
1204 /* Debug keys are insecure so don't store them persistently */
1205 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1206 return false;
d25e28ab
JH
1207
1208 /* Changed combination key and there's no previous one */
1209 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1210 return false;
d25e28ab
JH
1211
1212 /* Security mode 3 case */
1213 if (!conn)
745c0ce3 1214 return true;
d25e28ab
JH
1215
1216 /* Neither local nor remote side had no-bonding as requirement */
1217 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1218 return true;
d25e28ab
JH
1219
1220 /* Local side had dedicated bonding as requirement */
1221 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1222 return true;
d25e28ab
JH
1223
1224 /* Remote side had dedicated bonding as requirement */
1225 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1226 return true;
d25e28ab
JH
1227
1228 /* If none of the above criteria match, then don't store the key
1229 * persistently */
745c0ce3 1230 return false;
d25e28ab
JH
1231}
1232
c9839a11 1233struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1234{
c9839a11 1235 struct smp_ltk *k;
75d262c2 1236
c9839a11
VCG
1237 list_for_each_entry(k, &hdev->long_term_keys, list) {
1238 if (k->ediv != ediv ||
a8c5fb1a 1239 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1240 continue;
1241
c9839a11 1242 return k;
75d262c2
VCG
1243 }
1244
1245 return NULL;
1246}
1247EXPORT_SYMBOL(hci_find_ltk);
1248
c9839a11 1249struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1250 u8 addr_type)
75d262c2 1251{
c9839a11 1252 struct smp_ltk *k;
75d262c2 1253
c9839a11
VCG
1254 list_for_each_entry(k, &hdev->long_term_keys, list)
1255 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1256 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1257 return k;
1258
1259 return NULL;
1260}
c9839a11 1261EXPORT_SYMBOL(hci_find_ltk_by_addr);
75d262c2 1262
d25e28ab 1263int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1264 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1265{
1266 struct link_key *key, *old_key;
745c0ce3
VA
1267 u8 old_key_type;
1268 bool persistent;
55ed8ca1
JH
1269
1270 old_key = hci_find_link_key(hdev, bdaddr);
1271 if (old_key) {
1272 old_key_type = old_key->type;
1273 key = old_key;
1274 } else {
12adcf3a 1275 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1276 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1277 if (!key)
1278 return -ENOMEM;
1279 list_add(&key->list, &hdev->link_keys);
1280 }
1281
1282 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1283
d25e28ab
JH
1284 /* Some buggy controller combinations generate a changed
1285 * combination key for legacy pairing even when there's no
1286 * previous key */
1287 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1288 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1289 type = HCI_LK_COMBINATION;
655fe6ec
JH
1290 if (conn)
1291 conn->key_type = type;
1292 }
d25e28ab 1293
55ed8ca1 1294 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1295 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1296 key->pin_len = pin_len;
1297
b6020ba0 1298 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1299 key->type = old_key_type;
4748fed2
JH
1300 else
1301 key->type = type;
1302
4df378a1
JH
1303 if (!new_key)
1304 return 0;
1305
1306 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1307
744cf19e 1308 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1309
6ec5bcad
VA
1310 if (conn)
1311 conn->flush_key = !persistent;
55ed8ca1
JH
1312
1313 return 0;
1314}
1315
c9839a11 1316int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1317 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1318 ediv, u8 rand[8])
75d262c2 1319{
c9839a11 1320 struct smp_ltk *key, *old_key;
75d262c2 1321
c9839a11
VCG
1322 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1323 return 0;
75d262c2 1324
c9839a11
VCG
1325 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1326 if (old_key)
75d262c2 1327 key = old_key;
c9839a11
VCG
1328 else {
1329 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1330 if (!key)
1331 return -ENOMEM;
c9839a11 1332 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1333 }
1334
75d262c2 1335 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1336 key->bdaddr_type = addr_type;
1337 memcpy(key->val, tk, sizeof(key->val));
1338 key->authenticated = authenticated;
1339 key->ediv = ediv;
1340 key->enc_size = enc_size;
1341 key->type = type;
1342 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1343
c9839a11
VCG
1344 if (!new_key)
1345 return 0;
75d262c2 1346
261cc5aa
VCG
1347 if (type & HCI_SMP_LTK)
1348 mgmt_new_ltk(hdev, key, 1);
1349
75d262c2
VCG
1350 return 0;
1351}
1352
55ed8ca1
JH
1353int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1354{
1355 struct link_key *key;
1356
1357 key = hci_find_link_key(hdev, bdaddr);
1358 if (!key)
1359 return -ENOENT;
1360
1361 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1362
1363 list_del(&key->list);
1364 kfree(key);
1365
1366 return 0;
1367}
1368
b899efaf
VCG
1369int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1370{
1371 struct smp_ltk *k, *tmp;
1372
1373 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1374 if (bacmp(bdaddr, &k->bdaddr))
1375 continue;
1376
1377 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1378
1379 list_del(&k->list);
1380 kfree(k);
1381 }
1382
1383 return 0;
1384}
1385
6bd32326
VT
1386/* HCI command timer function */
1387static void hci_cmd_timer(unsigned long arg)
1388{
1389 struct hci_dev *hdev = (void *) arg;
1390
1391 BT_ERR("%s command tx timeout", hdev->name);
1392 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1393 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1394}
1395
2763eda6 1396struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1397 bdaddr_t *bdaddr)
2763eda6
SJ
1398{
1399 struct oob_data *data;
1400
1401 list_for_each_entry(data, &hdev->remote_oob_data, list)
1402 if (bacmp(bdaddr, &data->bdaddr) == 0)
1403 return data;
1404
1405 return NULL;
1406}
1407
1408int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1409{
1410 struct oob_data *data;
1411
1412 data = hci_find_remote_oob_data(hdev, bdaddr);
1413 if (!data)
1414 return -ENOENT;
1415
1416 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1417
1418 list_del(&data->list);
1419 kfree(data);
1420
1421 return 0;
1422}
1423
1424int hci_remote_oob_data_clear(struct hci_dev *hdev)
1425{
1426 struct oob_data *data, *n;
1427
1428 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1429 list_del(&data->list);
1430 kfree(data);
1431 }
1432
1433 return 0;
1434}
1435
1436int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1437 u8 *randomizer)
2763eda6
SJ
1438{
1439 struct oob_data *data;
1440
1441 data = hci_find_remote_oob_data(hdev, bdaddr);
1442
1443 if (!data) {
1444 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1445 if (!data)
1446 return -ENOMEM;
1447
1448 bacpy(&data->bdaddr, bdaddr);
1449 list_add(&data->list, &hdev->remote_oob_data);
1450 }
1451
1452 memcpy(data->hash, hash, sizeof(data->hash));
1453 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1454
1455 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1456
1457 return 0;
1458}
1459
04124681 1460struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1461{
8035ded4 1462 struct bdaddr_list *b;
b2a66aad 1463
8035ded4 1464 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1465 if (bacmp(bdaddr, &b->bdaddr) == 0)
1466 return b;
b2a66aad
AJ
1467
1468 return NULL;
1469}
1470
1471int hci_blacklist_clear(struct hci_dev *hdev)
1472{
1473 struct list_head *p, *n;
1474
1475 list_for_each_safe(p, n, &hdev->blacklist) {
1476 struct bdaddr_list *b;
1477
1478 b = list_entry(p, struct bdaddr_list, list);
1479
1480 list_del(p);
1481 kfree(b);
1482 }
1483
1484 return 0;
1485}
1486
88c1fe4b 1487int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1488{
1489 struct bdaddr_list *entry;
b2a66aad
AJ
1490
1491 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1492 return -EBADF;
1493
5e762444
AJ
1494 if (hci_blacklist_lookup(hdev, bdaddr))
1495 return -EEXIST;
b2a66aad
AJ
1496
1497 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1498 if (!entry)
1499 return -ENOMEM;
b2a66aad
AJ
1500
1501 bacpy(&entry->bdaddr, bdaddr);
1502
1503 list_add(&entry->list, &hdev->blacklist);
1504
88c1fe4b 1505 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1506}
1507
88c1fe4b 1508int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1509{
1510 struct bdaddr_list *entry;
b2a66aad 1511
1ec918ce 1512 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1513 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1514
1515 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1516 if (!entry)
5e762444 1517 return -ENOENT;
b2a66aad
AJ
1518
1519 list_del(&entry->list);
1520 kfree(entry);
1521
88c1fe4b 1522 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1523}
1524
7ba8b4be
AG
1525static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1526{
1527 struct le_scan_params *param = (struct le_scan_params *) opt;
1528 struct hci_cp_le_set_scan_param cp;
1529
1530 memset(&cp, 0, sizeof(cp));
1531 cp.type = param->type;
1532 cp.interval = cpu_to_le16(param->interval);
1533 cp.window = cpu_to_le16(param->window);
1534
1535 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1536}
1537
1538static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1539{
1540 struct hci_cp_le_set_scan_enable cp;
1541
1542 memset(&cp, 0, sizeof(cp));
1543 cp.enable = 1;
1544
1545 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1546}
1547
1548static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1549 u16 window, int timeout)
7ba8b4be
AG
1550{
1551 long timeo = msecs_to_jiffies(3000);
1552 struct le_scan_params param;
1553 int err;
1554
1555 BT_DBG("%s", hdev->name);
1556
1557 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1558 return -EINPROGRESS;
1559
1560 param.type = type;
1561 param.interval = interval;
1562 param.window = window;
1563
1564 hci_req_lock(hdev);
1565
1566 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
04124681 1567 timeo);
7ba8b4be
AG
1568 if (!err)
1569 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1570
1571 hci_req_unlock(hdev);
1572
1573 if (err < 0)
1574 return err;
1575
1576 schedule_delayed_work(&hdev->le_scan_disable,
04124681 1577 msecs_to_jiffies(timeout));
7ba8b4be
AG
1578
1579 return 0;
1580}
1581
7dbfac1d
AG
1582int hci_cancel_le_scan(struct hci_dev *hdev)
1583{
1584 BT_DBG("%s", hdev->name);
1585
1586 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1587 return -EALREADY;
1588
1589 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1590 struct hci_cp_le_set_scan_enable cp;
1591
1592 /* Send HCI command to disable LE Scan */
1593 memset(&cp, 0, sizeof(cp));
1594 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1595 }
1596
1597 return 0;
1598}
1599
7ba8b4be
AG
1600static void le_scan_disable_work(struct work_struct *work)
1601{
1602 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1603 le_scan_disable.work);
7ba8b4be
AG
1604 struct hci_cp_le_set_scan_enable cp;
1605
1606 BT_DBG("%s", hdev->name);
1607
1608 memset(&cp, 0, sizeof(cp));
1609
1610 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1611}
1612
28b75a89
AG
1613static void le_scan_work(struct work_struct *work)
1614{
1615 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1616 struct le_scan_params *param = &hdev->le_scan_params;
1617
1618 BT_DBG("%s", hdev->name);
1619
04124681
GP
1620 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1621 param->timeout);
28b75a89
AG
1622}
1623
1624int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1625 int timeout)
28b75a89
AG
1626{
1627 struct le_scan_params *param = &hdev->le_scan_params;
1628
1629 BT_DBG("%s", hdev->name);
1630
1631 if (work_busy(&hdev->le_scan))
1632 return -EINPROGRESS;
1633
1634 param->type = type;
1635 param->interval = interval;
1636 param->window = window;
1637 param->timeout = timeout;
1638
1639 queue_work(system_long_wq, &hdev->le_scan);
1640
1641 return 0;
1642}
1643
9be0dab7
DH
1644/* Alloc HCI device */
1645struct hci_dev *hci_alloc_dev(void)
1646{
1647 struct hci_dev *hdev;
1648
1649 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1650 if (!hdev)
1651 return NULL;
1652
b1b813d4
DH
1653 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1654 hdev->esco_type = (ESCO_HV1);
1655 hdev->link_mode = (HCI_LM_ACCEPT);
1656 hdev->io_capability = 0x03; /* No Input No Output */
1657
b1b813d4
DH
1658 hdev->sniff_max_interval = 800;
1659 hdev->sniff_min_interval = 80;
1660
1661 mutex_init(&hdev->lock);
1662 mutex_init(&hdev->req_lock);
1663
1664 INIT_LIST_HEAD(&hdev->mgmt_pending);
1665 INIT_LIST_HEAD(&hdev->blacklist);
1666 INIT_LIST_HEAD(&hdev->uuids);
1667 INIT_LIST_HEAD(&hdev->link_keys);
1668 INIT_LIST_HEAD(&hdev->long_term_keys);
1669 INIT_LIST_HEAD(&hdev->remote_oob_data);
b1b813d4
DH
1670
1671 INIT_WORK(&hdev->rx_work, hci_rx_work);
1672 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1673 INIT_WORK(&hdev->tx_work, hci_tx_work);
1674 INIT_WORK(&hdev->power_on, hci_power_on);
1675 INIT_WORK(&hdev->le_scan, le_scan_work);
1676
b1b813d4
DH
1677 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1678 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1679 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1680
9be0dab7 1681 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
1682 skb_queue_head_init(&hdev->rx_q);
1683 skb_queue_head_init(&hdev->cmd_q);
1684 skb_queue_head_init(&hdev->raw_q);
1685
1686 init_waitqueue_head(&hdev->req_wait_q);
1687
1688 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1689
b1b813d4
DH
1690 hci_init_sysfs(hdev);
1691 discovery_init(hdev);
1692 hci_conn_hash_init(hdev);
9be0dab7
DH
1693
1694 return hdev;
1695}
1696EXPORT_SYMBOL(hci_alloc_dev);
1697
1698/* Free HCI device */
1699void hci_free_dev(struct hci_dev *hdev)
1700{
1701 skb_queue_purge(&hdev->driver_init);
1702
1703 /* will free via device release */
1704 put_device(&hdev->dev);
1705}
1706EXPORT_SYMBOL(hci_free_dev);
1707
1da177e4
LT
1708/* Register HCI device */
1709int hci_register_dev(struct hci_dev *hdev)
1710{
fc50744c 1711 struct list_head *head, *p;
b1b813d4 1712 int id, error;
1da177e4 1713
010666a1 1714 if (!hdev->open || !hdev->close)
1da177e4
LT
1715 return -EINVAL;
1716
fc50744c
UF
1717 write_lock(&hci_dev_list_lock);
1718
08add513
MM
1719 /* Do not allow HCI_AMP devices to register at index 0,
1720 * so the index can be used as the AMP controller ID.
1721 */
1722 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
fc50744c 1723 head = &hci_dev_list;
1da177e4
LT
1724
1725 /* Find first available device id */
1726 list_for_each(p, &hci_dev_list) {
fc50744c
UF
1727 int nid = list_entry(p, struct hci_dev, list)->id;
1728 if (nid > id)
1da177e4 1729 break;
fc50744c
UF
1730 if (nid == id)
1731 id++;
1732 head = p;
1da177e4 1733 }
8e87d142 1734
1da177e4
LT
1735 sprintf(hdev->name, "hci%d", id);
1736 hdev->id = id;
2d8b3a11
AE
1737
1738 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1739
fc50744c 1740 list_add(&hdev->list, head);
1da177e4 1741
f20d09d5 1742 write_unlock(&hci_dev_list_lock);
1da177e4 1743
32845eb1 1744 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 1745 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1746 if (!hdev->workqueue) {
1747 error = -ENOMEM;
1748 goto err;
1749 }
f48fd9c8 1750
33ca954d
DH
1751 error = hci_add_sysfs(hdev);
1752 if (error < 0)
1753 goto err_wqueue;
1da177e4 1754
611b30f7 1755 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
1756 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1757 hdev);
611b30f7
MH
1758 if (hdev->rfkill) {
1759 if (rfkill_register(hdev->rfkill) < 0) {
1760 rfkill_destroy(hdev->rfkill);
1761 hdev->rfkill = NULL;
1762 }
1763 }
1764
a8b2d5c2
JH
1765 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1766 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1767 schedule_work(&hdev->power_on);
ab81cbf9 1768
1da177e4 1769 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1770 hci_dev_hold(hdev);
1da177e4
LT
1771
1772 return id;
f48fd9c8 1773
33ca954d
DH
1774err_wqueue:
1775 destroy_workqueue(hdev->workqueue);
1776err:
f20d09d5 1777 write_lock(&hci_dev_list_lock);
f48fd9c8 1778 list_del(&hdev->list);
f20d09d5 1779 write_unlock(&hci_dev_list_lock);
f48fd9c8 1780
33ca954d 1781 return error;
1da177e4
LT
1782}
1783EXPORT_SYMBOL(hci_register_dev);
1784
1785/* Unregister HCI device */
59735631 1786void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1787{
ef222013
MH
1788 int i;
1789
c13854ce 1790 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1791
94324962
JH
1792 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1793
f20d09d5 1794 write_lock(&hci_dev_list_lock);
1da177e4 1795 list_del(&hdev->list);
f20d09d5 1796 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1797
1798 hci_dev_do_close(hdev);
1799
cd4c5391 1800 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1801 kfree_skb(hdev->reassembly[i]);
1802
ab81cbf9 1803 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 1804 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1805 hci_dev_lock(hdev);
744cf19e 1806 mgmt_index_removed(hdev);
09fd0de5 1807 hci_dev_unlock(hdev);
56e5cb86 1808 }
ab81cbf9 1809
2e58ef3e
JH
1810 /* mgmt_index_removed should take care of emptying the
1811 * pending list */
1812 BUG_ON(!list_empty(&hdev->mgmt_pending));
1813
1da177e4
LT
1814 hci_notify(hdev, HCI_DEV_UNREG);
1815
611b30f7
MH
1816 if (hdev->rfkill) {
1817 rfkill_unregister(hdev->rfkill);
1818 rfkill_destroy(hdev->rfkill);
1819 }
1820
ce242970 1821 hci_del_sysfs(hdev);
147e2d59 1822
f48fd9c8
MH
1823 destroy_workqueue(hdev->workqueue);
1824
09fd0de5 1825 hci_dev_lock(hdev);
e2e0cacb 1826 hci_blacklist_clear(hdev);
2aeb9a1a 1827 hci_uuids_clear(hdev);
55ed8ca1 1828 hci_link_keys_clear(hdev);
b899efaf 1829 hci_smp_ltks_clear(hdev);
2763eda6 1830 hci_remote_oob_data_clear(hdev);
09fd0de5 1831 hci_dev_unlock(hdev);
e2e0cacb 1832
dc946bd8 1833 hci_dev_put(hdev);
1da177e4
LT
1834}
1835EXPORT_SYMBOL(hci_unregister_dev);
1836
1837/* Suspend HCI device */
1838int hci_suspend_dev(struct hci_dev *hdev)
1839{
1840 hci_notify(hdev, HCI_DEV_SUSPEND);
1841 return 0;
1842}
1843EXPORT_SYMBOL(hci_suspend_dev);
1844
1845/* Resume HCI device */
1846int hci_resume_dev(struct hci_dev *hdev)
1847{
1848 hci_notify(hdev, HCI_DEV_RESUME);
1849 return 0;
1850}
1851EXPORT_SYMBOL(hci_resume_dev);
1852
76bca880
MH
1853/* Receive frame from HCI drivers */
1854int hci_recv_frame(struct sk_buff *skb)
1855{
1856 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1857 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 1858 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
1859 kfree_skb(skb);
1860 return -ENXIO;
1861 }
1862
1863 /* Incomming skb */
1864 bt_cb(skb)->incoming = 1;
1865
1866 /* Time stamp */
1867 __net_timestamp(skb);
1868
76bca880 1869 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1870 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1871
76bca880
MH
1872 return 0;
1873}
1874EXPORT_SYMBOL(hci_recv_frame);
1875
33e882a5 1876static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 1877 int count, __u8 index)
33e882a5
SS
1878{
1879 int len = 0;
1880 int hlen = 0;
1881 int remain = count;
1882 struct sk_buff *skb;
1883 struct bt_skb_cb *scb;
1884
1885 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 1886 index >= NUM_REASSEMBLY)
33e882a5
SS
1887 return -EILSEQ;
1888
1889 skb = hdev->reassembly[index];
1890
1891 if (!skb) {
1892 switch (type) {
1893 case HCI_ACLDATA_PKT:
1894 len = HCI_MAX_FRAME_SIZE;
1895 hlen = HCI_ACL_HDR_SIZE;
1896 break;
1897 case HCI_EVENT_PKT:
1898 len = HCI_MAX_EVENT_SIZE;
1899 hlen = HCI_EVENT_HDR_SIZE;
1900 break;
1901 case HCI_SCODATA_PKT:
1902 len = HCI_MAX_SCO_SIZE;
1903 hlen = HCI_SCO_HDR_SIZE;
1904 break;
1905 }
1906
1e429f38 1907 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1908 if (!skb)
1909 return -ENOMEM;
1910
1911 scb = (void *) skb->cb;
1912 scb->expect = hlen;
1913 scb->pkt_type = type;
1914
1915 skb->dev = (void *) hdev;
1916 hdev->reassembly[index] = skb;
1917 }
1918
1919 while (count) {
1920 scb = (void *) skb->cb;
89bb46d0 1921 len = min_t(uint, scb->expect, count);
33e882a5
SS
1922
1923 memcpy(skb_put(skb, len), data, len);
1924
1925 count -= len;
1926 data += len;
1927 scb->expect -= len;
1928 remain = count;
1929
1930 switch (type) {
1931 case HCI_EVENT_PKT:
1932 if (skb->len == HCI_EVENT_HDR_SIZE) {
1933 struct hci_event_hdr *h = hci_event_hdr(skb);
1934 scb->expect = h->plen;
1935
1936 if (skb_tailroom(skb) < scb->expect) {
1937 kfree_skb(skb);
1938 hdev->reassembly[index] = NULL;
1939 return -ENOMEM;
1940 }
1941 }
1942 break;
1943
1944 case HCI_ACLDATA_PKT:
1945 if (skb->len == HCI_ACL_HDR_SIZE) {
1946 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1947 scb->expect = __le16_to_cpu(h->dlen);
1948
1949 if (skb_tailroom(skb) < scb->expect) {
1950 kfree_skb(skb);
1951 hdev->reassembly[index] = NULL;
1952 return -ENOMEM;
1953 }
1954 }
1955 break;
1956
1957 case HCI_SCODATA_PKT:
1958 if (skb->len == HCI_SCO_HDR_SIZE) {
1959 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1960 scb->expect = h->dlen;
1961
1962 if (skb_tailroom(skb) < scb->expect) {
1963 kfree_skb(skb);
1964 hdev->reassembly[index] = NULL;
1965 return -ENOMEM;
1966 }
1967 }
1968 break;
1969 }
1970
1971 if (scb->expect == 0) {
1972 /* Complete frame */
1973
1974 bt_cb(skb)->pkt_type = type;
1975 hci_recv_frame(skb);
1976
1977 hdev->reassembly[index] = NULL;
1978 return remain;
1979 }
1980 }
1981
1982 return remain;
1983}
1984
ef222013
MH
1985int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1986{
f39a3c06
SS
1987 int rem = 0;
1988
ef222013
MH
1989 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1990 return -EILSEQ;
1991
da5f6c37 1992 while (count) {
1e429f38 1993 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1994 if (rem < 0)
1995 return rem;
ef222013 1996
f39a3c06
SS
1997 data += (count - rem);
1998 count = rem;
f81c6224 1999 }
ef222013 2000
f39a3c06 2001 return rem;
ef222013
MH
2002}
2003EXPORT_SYMBOL(hci_recv_fragment);
2004
99811510
SS
2005#define STREAM_REASSEMBLY 0
2006
2007int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2008{
2009 int type;
2010 int rem = 0;
2011
da5f6c37 2012 while (count) {
99811510
SS
2013 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2014
2015 if (!skb) {
2016 struct { char type; } *pkt;
2017
2018 /* Start of the frame */
2019 pkt = data;
2020 type = pkt->type;
2021
2022 data++;
2023 count--;
2024 } else
2025 type = bt_cb(skb)->pkt_type;
2026
1e429f38 2027 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2028 STREAM_REASSEMBLY);
99811510
SS
2029 if (rem < 0)
2030 return rem;
2031
2032 data += (count - rem);
2033 count = rem;
f81c6224 2034 }
99811510
SS
2035
2036 return rem;
2037}
2038EXPORT_SYMBOL(hci_recv_stream_fragment);
2039
1da177e4
LT
2040/* ---- Interface to upper protocols ---- */
2041
1da177e4
LT
2042int hci_register_cb(struct hci_cb *cb)
2043{
2044 BT_DBG("%p name %s", cb, cb->name);
2045
f20d09d5 2046 write_lock(&hci_cb_list_lock);
1da177e4 2047 list_add(&cb->list, &hci_cb_list);
f20d09d5 2048 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2049
2050 return 0;
2051}
2052EXPORT_SYMBOL(hci_register_cb);
2053
2054int hci_unregister_cb(struct hci_cb *cb)
2055{
2056 BT_DBG("%p name %s", cb, cb->name);
2057
f20d09d5 2058 write_lock(&hci_cb_list_lock);
1da177e4 2059 list_del(&cb->list);
f20d09d5 2060 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2061
2062 return 0;
2063}
2064EXPORT_SYMBOL(hci_unregister_cb);
2065
2066static int hci_send_frame(struct sk_buff *skb)
2067{
2068 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2069
2070 if (!hdev) {
2071 kfree_skb(skb);
2072 return -ENODEV;
2073 }
2074
0d48d939 2075 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2076
cd82e61c
MH
2077 /* Time stamp */
2078 __net_timestamp(skb);
1da177e4 2079
cd82e61c
MH
2080 /* Send copy to monitor */
2081 hci_send_to_monitor(hdev, skb);
2082
2083 if (atomic_read(&hdev->promisc)) {
2084 /* Send copy to the sockets */
470fe1b5 2085 hci_send_to_sock(hdev, skb);
1da177e4
LT
2086 }
2087
2088 /* Get rid of skb owner, prior to sending to the driver. */
2089 skb_orphan(skb);
2090
2091 return hdev->send(skb);
2092}
2093
2094/* Send HCI command */
a9de9248 2095int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2096{
2097 int len = HCI_COMMAND_HDR_SIZE + plen;
2098 struct hci_command_hdr *hdr;
2099 struct sk_buff *skb;
2100
a9de9248 2101 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2102
2103 skb = bt_skb_alloc(len, GFP_ATOMIC);
2104 if (!skb) {
ef222013 2105 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2106 return -ENOMEM;
2107 }
2108
2109 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2110 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2111 hdr->plen = plen;
2112
2113 if (plen)
2114 memcpy(skb_put(skb, plen), param, plen);
2115
2116 BT_DBG("skb len %d", skb->len);
2117
0d48d939 2118 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2119 skb->dev = (void *) hdev;
c78ae283 2120
a5040efa
JH
2121 if (test_bit(HCI_INIT, &hdev->flags))
2122 hdev->init_last_cmd = opcode;
2123
1da177e4 2124 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2125 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2126
2127 return 0;
2128}
1da177e4
LT
2129
2130/* Get data from the previously sent command */
a9de9248 2131void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2132{
2133 struct hci_command_hdr *hdr;
2134
2135 if (!hdev->sent_cmd)
2136 return NULL;
2137
2138 hdr = (void *) hdev->sent_cmd->data;
2139
a9de9248 2140 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2141 return NULL;
2142
a9de9248 2143 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2144
2145 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2146}
2147
2148/* Send ACL data */
2149static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2150{
2151 struct hci_acl_hdr *hdr;
2152 int len = skb->len;
2153
badff6d0
ACM
2154 skb_push(skb, HCI_ACL_HDR_SIZE);
2155 skb_reset_transport_header(skb);
9c70220b 2156 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2157 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2158 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2159}
2160
73d80deb 2161static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
a8c5fb1a 2162 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2163{
2164 struct hci_dev *hdev = conn->hdev;
2165 struct sk_buff *list;
2166
087bfd99
GP
2167 skb->len = skb_headlen(skb);
2168 skb->data_len = 0;
2169
2170 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2171 hci_add_acl_hdr(skb, conn->handle, flags);
2172
70f23020
AE
2173 list = skb_shinfo(skb)->frag_list;
2174 if (!list) {
1da177e4
LT
2175 /* Non fragmented */
2176 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2177
73d80deb 2178 skb_queue_tail(queue, skb);
1da177e4
LT
2179 } else {
2180 /* Fragmented */
2181 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2182
2183 skb_shinfo(skb)->frag_list = NULL;
2184
2185 /* Queue all fragments atomically */
af3e6359 2186 spin_lock(&queue->lock);
1da177e4 2187
73d80deb 2188 __skb_queue_tail(queue, skb);
e702112f
AE
2189
2190 flags &= ~ACL_START;
2191 flags |= ACL_CONT;
1da177e4
LT
2192 do {
2193 skb = list; list = list->next;
8e87d142 2194
1da177e4 2195 skb->dev = (void *) hdev;
0d48d939 2196 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2197 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2198
2199 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2200
73d80deb 2201 __skb_queue_tail(queue, skb);
1da177e4
LT
2202 } while (list);
2203
af3e6359 2204 spin_unlock(&queue->lock);
1da177e4 2205 }
73d80deb
LAD
2206}
2207
2208void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2209{
2210 struct hci_conn *conn = chan->conn;
2211 struct hci_dev *hdev = conn->hdev;
2212
2213 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2214
2215 skb->dev = (void *) hdev;
73d80deb
LAD
2216
2217 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2218
3eff45ea 2219 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2220}
2221EXPORT_SYMBOL(hci_send_acl);
2222
2223/* Send SCO data */
0d861d8b 2224void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2225{
2226 struct hci_dev *hdev = conn->hdev;
2227 struct hci_sco_hdr hdr;
2228
2229 BT_DBG("%s len %d", hdev->name, skb->len);
2230
aca3192c 2231 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2232 hdr.dlen = skb->len;
2233
badff6d0
ACM
2234 skb_push(skb, HCI_SCO_HDR_SIZE);
2235 skb_reset_transport_header(skb);
9c70220b 2236 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2237
2238 skb->dev = (void *) hdev;
0d48d939 2239 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2240
1da177e4 2241 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2242 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2243}
2244EXPORT_SYMBOL(hci_send_sco);
2245
2246/* ---- HCI TX task (outgoing data) ---- */
2247
2248/* HCI Connection scheduler */
6039aa73
GP
2249static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2250 int *quote)
1da177e4
LT
2251{
2252 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2253 struct hci_conn *conn = NULL, *c;
abc5de8f 2254 unsigned int num = 0, min = ~0;
1da177e4 2255
8e87d142 2256 /* We don't have to lock device here. Connections are always
1da177e4 2257 * added and removed with TX task disabled. */
bf4c6325
GP
2258
2259 rcu_read_lock();
2260
2261 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2262 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2263 continue;
769be974
MH
2264
2265 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2266 continue;
2267
1da177e4
LT
2268 num++;
2269
2270 if (c->sent < min) {
2271 min = c->sent;
2272 conn = c;
2273 }
52087a79
LAD
2274
2275 if (hci_conn_num(hdev, type) == num)
2276 break;
1da177e4
LT
2277 }
2278
bf4c6325
GP
2279 rcu_read_unlock();
2280
1da177e4 2281 if (conn) {
6ed58ec5
VT
2282 int cnt, q;
2283
2284 switch (conn->type) {
2285 case ACL_LINK:
2286 cnt = hdev->acl_cnt;
2287 break;
2288 case SCO_LINK:
2289 case ESCO_LINK:
2290 cnt = hdev->sco_cnt;
2291 break;
2292 case LE_LINK:
2293 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2294 break;
2295 default:
2296 cnt = 0;
2297 BT_ERR("Unknown link type");
2298 }
2299
2300 q = cnt / num;
1da177e4
LT
2301 *quote = q ? q : 1;
2302 } else
2303 *quote = 0;
2304
2305 BT_DBG("conn %p quote %d", conn, *quote);
2306 return conn;
2307}
2308
6039aa73 2309static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2310{
2311 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2312 struct hci_conn *c;
1da177e4 2313
bae1f5d9 2314 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2315
bf4c6325
GP
2316 rcu_read_lock();
2317
1da177e4 2318 /* Kill stalled connections */
bf4c6325 2319 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2320 if (c->type == type && c->sent) {
2321 BT_ERR("%s killing stalled connection %s",
a8c5fb1a 2322 hdev->name, batostr(&c->dst));
1da177e4
LT
2323 hci_acl_disconn(c, 0x13);
2324 }
2325 }
bf4c6325
GP
2326
2327 rcu_read_unlock();
1da177e4
LT
2328}
2329
6039aa73
GP
2330static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2331 int *quote)
1da177e4 2332{
73d80deb
LAD
2333 struct hci_conn_hash *h = &hdev->conn_hash;
2334 struct hci_chan *chan = NULL;
abc5de8f 2335 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2336 struct hci_conn *conn;
73d80deb
LAD
2337 int cnt, q, conn_num = 0;
2338
2339 BT_DBG("%s", hdev->name);
2340
bf4c6325
GP
2341 rcu_read_lock();
2342
2343 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2344 struct hci_chan *tmp;
2345
2346 if (conn->type != type)
2347 continue;
2348
2349 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2350 continue;
2351
2352 conn_num++;
2353
8192edef 2354 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2355 struct sk_buff *skb;
2356
2357 if (skb_queue_empty(&tmp->data_q))
2358 continue;
2359
2360 skb = skb_peek(&tmp->data_q);
2361 if (skb->priority < cur_prio)
2362 continue;
2363
2364 if (skb->priority > cur_prio) {
2365 num = 0;
2366 min = ~0;
2367 cur_prio = skb->priority;
2368 }
2369
2370 num++;
2371
2372 if (conn->sent < min) {
2373 min = conn->sent;
2374 chan = tmp;
2375 }
2376 }
2377
2378 if (hci_conn_num(hdev, type) == conn_num)
2379 break;
2380 }
2381
bf4c6325
GP
2382 rcu_read_unlock();
2383
73d80deb
LAD
2384 if (!chan)
2385 return NULL;
2386
2387 switch (chan->conn->type) {
2388 case ACL_LINK:
2389 cnt = hdev->acl_cnt;
2390 break;
2391 case SCO_LINK:
2392 case ESCO_LINK:
2393 cnt = hdev->sco_cnt;
2394 break;
2395 case LE_LINK:
2396 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2397 break;
2398 default:
2399 cnt = 0;
2400 BT_ERR("Unknown link type");
2401 }
2402
2403 q = cnt / num;
2404 *quote = q ? q : 1;
2405 BT_DBG("chan %p quote %d", chan, *quote);
2406 return chan;
2407}
2408
02b20f0b
LAD
2409static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2410{
2411 struct hci_conn_hash *h = &hdev->conn_hash;
2412 struct hci_conn *conn;
2413 int num = 0;
2414
2415 BT_DBG("%s", hdev->name);
2416
bf4c6325
GP
2417 rcu_read_lock();
2418
2419 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2420 struct hci_chan *chan;
2421
2422 if (conn->type != type)
2423 continue;
2424
2425 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2426 continue;
2427
2428 num++;
2429
8192edef 2430 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2431 struct sk_buff *skb;
2432
2433 if (chan->sent) {
2434 chan->sent = 0;
2435 continue;
2436 }
2437
2438 if (skb_queue_empty(&chan->data_q))
2439 continue;
2440
2441 skb = skb_peek(&chan->data_q);
2442 if (skb->priority >= HCI_PRIO_MAX - 1)
2443 continue;
2444
2445 skb->priority = HCI_PRIO_MAX - 1;
2446
2447 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2448 skb->priority);
02b20f0b
LAD
2449 }
2450
2451 if (hci_conn_num(hdev, type) == num)
2452 break;
2453 }
bf4c6325
GP
2454
2455 rcu_read_unlock();
2456
02b20f0b
LAD
2457}
2458
b71d385a
AE
2459static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2460{
2461 /* Calculate count of blocks used by this packet */
2462 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2463}
2464
6039aa73 2465static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2466{
1da177e4
LT
2467 if (!test_bit(HCI_RAW, &hdev->flags)) {
2468 /* ACL tx timeout must be longer than maximum
2469 * link supervision timeout (40.9 seconds) */
63d2bc1b 2470 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
a8c5fb1a 2471 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2472 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2473 }
63d2bc1b 2474}
1da177e4 2475
6039aa73 2476static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2477{
2478 unsigned int cnt = hdev->acl_cnt;
2479 struct hci_chan *chan;
2480 struct sk_buff *skb;
2481 int quote;
2482
2483 __check_timeout(hdev, cnt);
04837f64 2484
73d80deb 2485 while (hdev->acl_cnt &&
a8c5fb1a 2486 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2487 u32 priority = (skb_peek(&chan->data_q))->priority;
2488 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2489 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2490 skb->len, skb->priority);
73d80deb 2491
ec1cce24
LAD
2492 /* Stop if priority has changed */
2493 if (skb->priority < priority)
2494 break;
2495
2496 skb = skb_dequeue(&chan->data_q);
2497
73d80deb 2498 hci_conn_enter_active_mode(chan->conn,
04124681 2499 bt_cb(skb)->force_active);
04837f64 2500
1da177e4
LT
2501 hci_send_frame(skb);
2502 hdev->acl_last_tx = jiffies;
2503
2504 hdev->acl_cnt--;
73d80deb
LAD
2505 chan->sent++;
2506 chan->conn->sent++;
1da177e4
LT
2507 }
2508 }
02b20f0b
LAD
2509
2510 if (cnt != hdev->acl_cnt)
2511 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2512}
2513
6039aa73 2514static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2515{
63d2bc1b 2516 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2517 struct hci_chan *chan;
2518 struct sk_buff *skb;
2519 int quote;
b71d385a 2520
63d2bc1b 2521 __check_timeout(hdev, cnt);
b71d385a
AE
2522
2523 while (hdev->block_cnt > 0 &&
a8c5fb1a 2524 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
b71d385a
AE
2525 u32 priority = (skb_peek(&chan->data_q))->priority;
2526 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2527 int blocks;
2528
2529 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2530 skb->len, skb->priority);
b71d385a
AE
2531
2532 /* Stop if priority has changed */
2533 if (skb->priority < priority)
2534 break;
2535
2536 skb = skb_dequeue(&chan->data_q);
2537
2538 blocks = __get_blocks(hdev, skb);
2539 if (blocks > hdev->block_cnt)
2540 return;
2541
2542 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2543 bt_cb(skb)->force_active);
b71d385a
AE
2544
2545 hci_send_frame(skb);
2546 hdev->acl_last_tx = jiffies;
2547
2548 hdev->block_cnt -= blocks;
2549 quote -= blocks;
2550
2551 chan->sent += blocks;
2552 chan->conn->sent += blocks;
2553 }
2554 }
2555
2556 if (cnt != hdev->block_cnt)
2557 hci_prio_recalculate(hdev, ACL_LINK);
2558}
2559
6039aa73 2560static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
2561{
2562 BT_DBG("%s", hdev->name);
2563
2564 if (!hci_conn_num(hdev, ACL_LINK))
2565 return;
2566
2567 switch (hdev->flow_ctl_mode) {
2568 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2569 hci_sched_acl_pkt(hdev);
2570 break;
2571
2572 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2573 hci_sched_acl_blk(hdev);
2574 break;
2575 }
2576}
2577
1da177e4 2578/* Schedule SCO */
6039aa73 2579static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
2580{
2581 struct hci_conn *conn;
2582 struct sk_buff *skb;
2583 int quote;
2584
2585 BT_DBG("%s", hdev->name);
2586
52087a79
LAD
2587 if (!hci_conn_num(hdev, SCO_LINK))
2588 return;
2589
1da177e4
LT
2590 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2591 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2592 BT_DBG("skb %p len %d", skb, skb->len);
2593 hci_send_frame(skb);
2594
2595 conn->sent++;
2596 if (conn->sent == ~0)
2597 conn->sent = 0;
2598 }
2599 }
2600}
2601
6039aa73 2602static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
2603{
2604 struct hci_conn *conn;
2605 struct sk_buff *skb;
2606 int quote;
2607
2608 BT_DBG("%s", hdev->name);
2609
52087a79
LAD
2610 if (!hci_conn_num(hdev, ESCO_LINK))
2611 return;
2612
b6a0dc82
MH
2613 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2614 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2615 BT_DBG("skb %p len %d", skb, skb->len);
2616 hci_send_frame(skb);
2617
2618 conn->sent++;
2619 if (conn->sent == ~0)
2620 conn->sent = 0;
2621 }
2622 }
2623}
2624
6039aa73 2625static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 2626{
73d80deb 2627 struct hci_chan *chan;
6ed58ec5 2628 struct sk_buff *skb;
02b20f0b 2629 int quote, cnt, tmp;
6ed58ec5
VT
2630
2631 BT_DBG("%s", hdev->name);
2632
52087a79
LAD
2633 if (!hci_conn_num(hdev, LE_LINK))
2634 return;
2635
6ed58ec5
VT
2636 if (!test_bit(HCI_RAW, &hdev->flags)) {
2637 /* LE tx timeout must be longer than maximum
2638 * link supervision timeout (40.9 seconds) */
bae1f5d9 2639 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 2640 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2641 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2642 }
2643
2644 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2645 tmp = cnt;
73d80deb 2646 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2647 u32 priority = (skb_peek(&chan->data_q))->priority;
2648 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2649 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2650 skb->len, skb->priority);
6ed58ec5 2651
ec1cce24
LAD
2652 /* Stop if priority has changed */
2653 if (skb->priority < priority)
2654 break;
2655
2656 skb = skb_dequeue(&chan->data_q);
2657
6ed58ec5
VT
2658 hci_send_frame(skb);
2659 hdev->le_last_tx = jiffies;
2660
2661 cnt--;
73d80deb
LAD
2662 chan->sent++;
2663 chan->conn->sent++;
6ed58ec5
VT
2664 }
2665 }
73d80deb 2666
6ed58ec5
VT
2667 if (hdev->le_pkts)
2668 hdev->le_cnt = cnt;
2669 else
2670 hdev->acl_cnt = cnt;
02b20f0b
LAD
2671
2672 if (cnt != tmp)
2673 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2674}
2675
3eff45ea 2676static void hci_tx_work(struct work_struct *work)
1da177e4 2677{
3eff45ea 2678 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2679 struct sk_buff *skb;
2680
6ed58ec5 2681 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 2682 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2683
2684 /* Schedule queues and send stuff to HCI driver */
2685
2686 hci_sched_acl(hdev);
2687
2688 hci_sched_sco(hdev);
2689
b6a0dc82
MH
2690 hci_sched_esco(hdev);
2691
6ed58ec5
VT
2692 hci_sched_le(hdev);
2693
1da177e4
LT
2694 /* Send next queued raw (unknown type) packet */
2695 while ((skb = skb_dequeue(&hdev->raw_q)))
2696 hci_send_frame(skb);
1da177e4
LT
2697}
2698
25985edc 2699/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2700
2701/* ACL data packet */
6039aa73 2702static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2703{
2704 struct hci_acl_hdr *hdr = (void *) skb->data;
2705 struct hci_conn *conn;
2706 __u16 handle, flags;
2707
2708 skb_pull(skb, HCI_ACL_HDR_SIZE);
2709
2710 handle = __le16_to_cpu(hdr->handle);
2711 flags = hci_flags(handle);
2712 handle = hci_handle(handle);
2713
a8c5fb1a
GP
2714 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2715 handle, flags);
1da177e4
LT
2716
2717 hdev->stat.acl_rx++;
2718
2719 hci_dev_lock(hdev);
2720 conn = hci_conn_hash_lookup_handle(hdev, handle);
2721 hci_dev_unlock(hdev);
8e87d142 2722
1da177e4 2723 if (conn) {
65983fc7 2724 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2725
671267bf
JH
2726 hci_dev_lock(hdev);
2727 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2728 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2729 mgmt_device_connected(hdev, &conn->dst, conn->type,
2730 conn->dst_type, 0, NULL, 0,
2731 conn->dev_class);
2732 hci_dev_unlock(hdev);
2733
1da177e4 2734 /* Send to upper protocol */
686ebf28
UF
2735 l2cap_recv_acldata(conn, skb, flags);
2736 return;
1da177e4 2737 } else {
8e87d142 2738 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 2739 hdev->name, handle);
1da177e4
LT
2740 }
2741
2742 kfree_skb(skb);
2743}
2744
2745/* SCO data packet */
6039aa73 2746static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2747{
2748 struct hci_sco_hdr *hdr = (void *) skb->data;
2749 struct hci_conn *conn;
2750 __u16 handle;
2751
2752 skb_pull(skb, HCI_SCO_HDR_SIZE);
2753
2754 handle = __le16_to_cpu(hdr->handle);
2755
2756 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2757
2758 hdev->stat.sco_rx++;
2759
2760 hci_dev_lock(hdev);
2761 conn = hci_conn_hash_lookup_handle(hdev, handle);
2762 hci_dev_unlock(hdev);
2763
2764 if (conn) {
1da177e4 2765 /* Send to upper protocol */
686ebf28
UF
2766 sco_recv_scodata(conn, skb);
2767 return;
1da177e4 2768 } else {
8e87d142 2769 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 2770 hdev->name, handle);
1da177e4
LT
2771 }
2772
2773 kfree_skb(skb);
2774}
2775
b78752cc 2776static void hci_rx_work(struct work_struct *work)
1da177e4 2777{
b78752cc 2778 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2779 struct sk_buff *skb;
2780
2781 BT_DBG("%s", hdev->name);
2782
1da177e4 2783 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2784 /* Send copy to monitor */
2785 hci_send_to_monitor(hdev, skb);
2786
1da177e4
LT
2787 if (atomic_read(&hdev->promisc)) {
2788 /* Send copy to the sockets */
470fe1b5 2789 hci_send_to_sock(hdev, skb);
1da177e4
LT
2790 }
2791
2792 if (test_bit(HCI_RAW, &hdev->flags)) {
2793 kfree_skb(skb);
2794 continue;
2795 }
2796
2797 if (test_bit(HCI_INIT, &hdev->flags)) {
2798 /* Don't process data packets in this states. */
0d48d939 2799 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2800 case HCI_ACLDATA_PKT:
2801 case HCI_SCODATA_PKT:
2802 kfree_skb(skb);
2803 continue;
3ff50b79 2804 }
1da177e4
LT
2805 }
2806
2807 /* Process frame */
0d48d939 2808 switch (bt_cb(skb)->pkt_type) {
1da177e4 2809 case HCI_EVENT_PKT:
b78752cc 2810 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2811 hci_event_packet(hdev, skb);
2812 break;
2813
2814 case HCI_ACLDATA_PKT:
2815 BT_DBG("%s ACL data packet", hdev->name);
2816 hci_acldata_packet(hdev, skb);
2817 break;
2818
2819 case HCI_SCODATA_PKT:
2820 BT_DBG("%s SCO data packet", hdev->name);
2821 hci_scodata_packet(hdev, skb);
2822 break;
2823
2824 default:
2825 kfree_skb(skb);
2826 break;
2827 }
2828 }
1da177e4
LT
2829}
2830
c347b765 2831static void hci_cmd_work(struct work_struct *work)
1da177e4 2832{
c347b765 2833 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2834 struct sk_buff *skb;
2835
2836 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2837
1da177e4 2838 /* Send queued commands */
5a08ecce
AE
2839 if (atomic_read(&hdev->cmd_cnt)) {
2840 skb = skb_dequeue(&hdev->cmd_q);
2841 if (!skb)
2842 return;
2843
7585b97a 2844 kfree_skb(hdev->sent_cmd);
1da177e4 2845
70f23020
AE
2846 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2847 if (hdev->sent_cmd) {
1da177e4
LT
2848 atomic_dec(&hdev->cmd_cnt);
2849 hci_send_frame(skb);
7bdb8a5c
SJ
2850 if (test_bit(HCI_RESET, &hdev->flags))
2851 del_timer(&hdev->cmd_timer);
2852 else
2853 mod_timer(&hdev->cmd_timer,
6bd32326 2854 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2855 } else {
2856 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2857 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2858 }
2859 }
2860}
2519a1fc
AG
2861
2862int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2863{
2864 /* General inquiry access code (GIAC) */
2865 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2866 struct hci_cp_inquiry cp;
2867
2868 BT_DBG("%s", hdev->name);
2869
2870 if (test_bit(HCI_INQUIRY, &hdev->flags))
2871 return -EINPROGRESS;
2872
4663262c
JH
2873 inquiry_cache_flush(hdev);
2874
2519a1fc
AG
2875 memset(&cp, 0, sizeof(cp));
2876 memcpy(&cp.lap, lap, sizeof(cp.lap));
2877 cp.length = length;
2878
2879 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2880}
023d5049
AG
2881
2882int hci_cancel_inquiry(struct hci_dev *hdev)
2883{
2884 BT_DBG("%s", hdev->name);
2885
2886 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 2887 return -EALREADY;
023d5049
AG
2888
2889 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2890}
31f7956c
AG
2891
2892u8 bdaddr_to_le(u8 bdaddr_type)
2893{
2894 switch (bdaddr_type) {
2895 case BDADDR_LE_PUBLIC:
2896 return ADDR_LE_DEV_PUBLIC;
2897
2898 default:
2899 /* Fallback to LE Random address type */
2900 return ADDR_LE_DEV_RANDOM;
2901 }
2902}