Bluetooth: Fix coding style in the subsystem
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4 42#include <linux/interrupt.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
70f23020 48#include <linux/uaccess.h>
1da177e4
LT
49#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
ab81cbf9
JH
54#define AUTO_OFF_TIMEOUT 2000
55
b78752cc 56static void hci_rx_work(struct work_struct *work);
c347b765 57static void hci_cmd_work(struct work_struct *work);
3eff45ea 58static void hci_tx_work(struct work_struct *work);
1da177e4 59
1da177e4
LT
60/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
75/* ---- HCI requests ---- */
76
23bb5763 77void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 78{
23bb5763
JH
79 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
a5040efa
JH
81 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
83 */
75fb0e32
JH
84 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1036b890 86 u16 opcode = __le16_to_cpu(sent->opcode);
75fb0e32
JH
87 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
1036b890 96 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
75fb0e32
JH
97 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
23bb5763 105 return;
75fb0e32 106 }
1da177e4
LT
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
a8c5fb1a
GP
127static int __hci_request(struct hci_dev *hdev,
128 void (*req)(struct hci_dev *hdev, unsigned long opt),
129 unsigned long opt, __u32 timeout)
1da177e4
LT
130{
131 DECLARE_WAITQUEUE(wait, current);
132 int err = 0;
133
134 BT_DBG("%s start", hdev->name);
135
136 hdev->req_status = HCI_REQ_PEND;
137
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
140
141 req(hdev, opt);
142 schedule_timeout(timeout);
143
144 remove_wait_queue(&hdev->req_wait_q, &wait);
145
146 if (signal_pending(current))
147 return -EINTR;
148
149 switch (hdev->req_status) {
150 case HCI_REQ_DONE:
e175072f 151 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
152 break;
153
154 case HCI_REQ_CANCELED:
155 err = -hdev->req_result;
156 break;
157
158 default:
159 err = -ETIMEDOUT;
160 break;
3ff50b79 161 }
1da177e4 162
a5040efa 163 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 return err;
168}
169
6039aa73
GP
170static int hci_request(struct hci_dev *hdev,
171 void (*req)(struct hci_dev *hdev, unsigned long opt),
172 unsigned long opt, __u32 timeout)
1da177e4
LT
173{
174 int ret;
175
7c6a329e
MH
176 if (!test_bit(HCI_UP, &hdev->flags))
177 return -ENETDOWN;
178
1da177e4
LT
179 /* Serialize all requests */
180 hci_req_lock(hdev);
181 ret = __hci_request(hdev, req, opt, timeout);
182 hci_req_unlock(hdev);
183
184 return ret;
185}
186
187static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
188{
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Reset device */
f630cf0d 192 set_bit(HCI_RESET, &hdev->flags);
a9de9248 193 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
194}
195
e61ef499 196static void bredr_init(struct hci_dev *hdev)
1da177e4 197{
b0916ea0 198 struct hci_cp_delete_stored_link_key cp;
1ebb9252 199 __le16 param;
89f2783d 200 __u8 flt_type;
1da177e4 201
2455a3ea
AE
202 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
203
1da177e4
LT
204 /* Mandatory initialization */
205
206 /* Reset */
a6c511c6 207 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
e61ef499
AE
208 set_bit(HCI_RESET, &hdev->flags);
209 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 210 }
1da177e4
LT
211
212 /* Read Local Supported Features */
a9de9248 213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 214
1143e5a6 215 /* Read Local Version */
a9de9248 216 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 217
1da177e4 218 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 219 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 220
1da177e4 221 /* Read BD Address */
a9de9248
MH
222 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
223
224 /* Read Class of Device */
225 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
226
227 /* Read Local Name */
228 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
229
230 /* Read Voice Setting */
a9de9248 231 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
232
233 /* Optional initialization */
234
235 /* Clear Event Filters */
89f2783d 236 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 237 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 238
1da177e4 239 /* Connection accept timeout ~20 secs */
aca3192c 240 param = cpu_to_le16(0x7d00);
a9de9248 241 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
242
243 bacpy(&cp.bdaddr, BDADDR_ANY);
244 cp.delete_all = 1;
245 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
246}
247
e61ef499
AE
248static void amp_init(struct hci_dev *hdev)
249{
2455a3ea
AE
250 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
251
e61ef499
AE
252 /* Reset */
253 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
254
255 /* Read Local Version */
256 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
257
258 /* Read Local AMP Info */
259 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e61ef499
AE
260}
261
262static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
263{
264 struct sk_buff *skb;
265
266 BT_DBG("%s %ld", hdev->name, opt);
267
268 /* Driver initialization */
269
270 /* Special commands */
271 while ((skb = skb_dequeue(&hdev->driver_init))) {
272 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
273 skb->dev = (void *) hdev;
274
275 skb_queue_tail(&hdev->cmd_q, skb);
276 queue_work(hdev->workqueue, &hdev->cmd_work);
277 }
278 skb_queue_purge(&hdev->driver_init);
279
280 switch (hdev->dev_type) {
281 case HCI_BREDR:
282 bredr_init(hdev);
283 break;
284
285 case HCI_AMP:
286 amp_init(hdev);
287 break;
288
289 default:
290 BT_ERR("Unknown device type %d", hdev->dev_type);
291 break;
292 }
293
294}
295
6ed58ec5
VT
296static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
297{
298 BT_DBG("%s", hdev->name);
299
300 /* Read LE buffer size */
301 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
302}
303
1da177e4
LT
304static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
305{
306 __u8 scan = opt;
307
308 BT_DBG("%s %x", hdev->name, scan);
309
310 /* Inquiry and Page scans */
a9de9248 311 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
312}
313
314static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
315{
316 __u8 auth = opt;
317
318 BT_DBG("%s %x", hdev->name, auth);
319
320 /* Authentication */
a9de9248 321 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
322}
323
324static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
325{
326 __u8 encrypt = opt;
327
328 BT_DBG("%s %x", hdev->name, encrypt);
329
e4e8e37c 330 /* Encryption */
a9de9248 331 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
332}
333
e4e8e37c
MH
334static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
335{
336 __le16 policy = cpu_to_le16(opt);
337
a418b893 338 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
339
340 /* Default link policy */
341 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
342}
343
8e87d142 344/* Get HCI device by index.
1da177e4
LT
345 * Device is held on return. */
346struct hci_dev *hci_dev_get(int index)
347{
8035ded4 348 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
349
350 BT_DBG("%d", index);
351
352 if (index < 0)
353 return NULL;
354
355 read_lock(&hci_dev_list_lock);
8035ded4 356 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
357 if (d->id == index) {
358 hdev = hci_dev_hold(d);
359 break;
360 }
361 }
362 read_unlock(&hci_dev_list_lock);
363 return hdev;
364}
1da177e4
LT
365
366/* ---- Inquiry support ---- */
ff9ef578 367
30dc78e1
JH
368bool hci_discovery_active(struct hci_dev *hdev)
369{
370 struct discovery_state *discov = &hdev->discovery;
371
6fbe195d 372 switch (discov->state) {
343f935b 373 case DISCOVERY_FINDING:
6fbe195d 374 case DISCOVERY_RESOLVING:
30dc78e1
JH
375 return true;
376
6fbe195d
AG
377 default:
378 return false;
379 }
30dc78e1
JH
380}
381
ff9ef578
JH
382void hci_discovery_set_state(struct hci_dev *hdev, int state)
383{
384 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
385
386 if (hdev->discovery.state == state)
387 return;
388
389 switch (state) {
390 case DISCOVERY_STOPPED:
7b99b659
AG
391 if (hdev->discovery.state != DISCOVERY_STARTING)
392 mgmt_discovering(hdev, 0);
ff9ef578
JH
393 break;
394 case DISCOVERY_STARTING:
395 break;
343f935b 396 case DISCOVERY_FINDING:
ff9ef578
JH
397 mgmt_discovering(hdev, 1);
398 break;
30dc78e1
JH
399 case DISCOVERY_RESOLVING:
400 break;
ff9ef578
JH
401 case DISCOVERY_STOPPING:
402 break;
403 }
404
405 hdev->discovery.state = state;
406}
407
1da177e4
LT
408static void inquiry_cache_flush(struct hci_dev *hdev)
409{
30883512 410 struct discovery_state *cache = &hdev->discovery;
b57c1a56 411 struct inquiry_entry *p, *n;
1da177e4 412
561aafbc
JH
413 list_for_each_entry_safe(p, n, &cache->all, all) {
414 list_del(&p->all);
b57c1a56 415 kfree(p);
1da177e4 416 }
561aafbc
JH
417
418 INIT_LIST_HEAD(&cache->unknown);
419 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
420}
421
a8c5fb1a
GP
422struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
423 bdaddr_t *bdaddr)
1da177e4 424{
30883512 425 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
426 struct inquiry_entry *e;
427
428 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
429
561aafbc
JH
430 list_for_each_entry(e, &cache->all, all) {
431 if (!bacmp(&e->data.bdaddr, bdaddr))
432 return e;
433 }
434
435 return NULL;
436}
437
438struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 439 bdaddr_t *bdaddr)
561aafbc 440{
30883512 441 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
442 struct inquiry_entry *e;
443
444 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
445
446 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 447 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
448 return e;
449 }
450
451 return NULL;
1da177e4
LT
452}
453
30dc78e1 454struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
455 bdaddr_t *bdaddr,
456 int state)
30dc78e1
JH
457{
458 struct discovery_state *cache = &hdev->discovery;
459 struct inquiry_entry *e;
460
461 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
462
463 list_for_each_entry(e, &cache->resolve, list) {
464 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
465 return e;
466 if (!bacmp(&e->data.bdaddr, bdaddr))
467 return e;
468 }
469
470 return NULL;
471}
472
a3d4e20a 473void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 474 struct inquiry_entry *ie)
a3d4e20a
JH
475{
476 struct discovery_state *cache = &hdev->discovery;
477 struct list_head *pos = &cache->resolve;
478 struct inquiry_entry *p;
479
480 list_del(&ie->list);
481
482 list_for_each_entry(p, &cache->resolve, list) {
483 if (p->name_state != NAME_PENDING &&
a8c5fb1a 484 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
485 break;
486 pos = &p->list;
487 }
488
489 list_add(&ie->list, pos);
490}
491
3175405b 492bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 493 bool name_known, bool *ssp)
1da177e4 494{
30883512 495 struct discovery_state *cache = &hdev->discovery;
70f23020 496 struct inquiry_entry *ie;
1da177e4
LT
497
498 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
499
388fc8fa
JH
500 if (ssp)
501 *ssp = data->ssp_mode;
502
70f23020 503 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 504 if (ie) {
388fc8fa
JH
505 if (ie->data.ssp_mode && ssp)
506 *ssp = true;
507
a3d4e20a 508 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 509 data->rssi != ie->data.rssi) {
a3d4e20a
JH
510 ie->data.rssi = data->rssi;
511 hci_inquiry_cache_update_resolve(hdev, ie);
512 }
513
561aafbc 514 goto update;
a3d4e20a 515 }
561aafbc
JH
516
517 /* Entry not in the cache. Add new one. */
518 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
519 if (!ie)
3175405b 520 return false;
561aafbc
JH
521
522 list_add(&ie->all, &cache->all);
523
524 if (name_known) {
525 ie->name_state = NAME_KNOWN;
526 } else {
527 ie->name_state = NAME_NOT_KNOWN;
528 list_add(&ie->list, &cache->unknown);
529 }
70f23020 530
561aafbc
JH
531update:
532 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 533 ie->name_state != NAME_PENDING) {
561aafbc
JH
534 ie->name_state = NAME_KNOWN;
535 list_del(&ie->list);
1da177e4
LT
536 }
537
70f23020
AE
538 memcpy(&ie->data, data, sizeof(*data));
539 ie->timestamp = jiffies;
1da177e4 540 cache->timestamp = jiffies;
3175405b
JH
541
542 if (ie->name_state == NAME_NOT_KNOWN)
543 return false;
544
545 return true;
1da177e4
LT
546}
547
548static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
549{
30883512 550 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
551 struct inquiry_info *info = (struct inquiry_info *) buf;
552 struct inquiry_entry *e;
553 int copied = 0;
554
561aafbc 555 list_for_each_entry(e, &cache->all, all) {
1da177e4 556 struct inquiry_data *data = &e->data;
b57c1a56
JH
557
558 if (copied >= num)
559 break;
560
1da177e4
LT
561 bacpy(&info->bdaddr, &data->bdaddr);
562 info->pscan_rep_mode = data->pscan_rep_mode;
563 info->pscan_period_mode = data->pscan_period_mode;
564 info->pscan_mode = data->pscan_mode;
565 memcpy(info->dev_class, data->dev_class, 3);
566 info->clock_offset = data->clock_offset;
b57c1a56 567
1da177e4 568 info++;
b57c1a56 569 copied++;
1da177e4
LT
570 }
571
572 BT_DBG("cache %p, copied %d", cache, copied);
573 return copied;
574}
575
576static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
577{
578 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
579 struct hci_cp_inquiry cp;
580
581 BT_DBG("%s", hdev->name);
582
583 if (test_bit(HCI_INQUIRY, &hdev->flags))
584 return;
585
586 /* Start Inquiry */
587 memcpy(&cp.lap, &ir->lap, 3);
588 cp.length = ir->length;
589 cp.num_rsp = ir->num_rsp;
a9de9248 590 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
591}
592
593int hci_inquiry(void __user *arg)
594{
595 __u8 __user *ptr = arg;
596 struct hci_inquiry_req ir;
597 struct hci_dev *hdev;
598 int err = 0, do_inquiry = 0, max_rsp;
599 long timeo;
600 __u8 *buf;
601
602 if (copy_from_user(&ir, ptr, sizeof(ir)))
603 return -EFAULT;
604
5a08ecce
AE
605 hdev = hci_dev_get(ir.dev_id);
606 if (!hdev)
1da177e4
LT
607 return -ENODEV;
608
09fd0de5 609 hci_dev_lock(hdev);
8e87d142 610 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 611 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
612 inquiry_cache_flush(hdev);
613 do_inquiry = 1;
614 }
09fd0de5 615 hci_dev_unlock(hdev);
1da177e4 616
04837f64 617 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
618
619 if (do_inquiry) {
620 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
621 if (err < 0)
622 goto done;
623 }
1da177e4 624
8fc9ced3
GP
625 /* for unlimited number of responses we will use buffer with
626 * 255 entries
627 */
1da177e4
LT
628 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
629
630 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
631 * copy it to the user space.
632 */
01df8c31 633 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 634 if (!buf) {
1da177e4
LT
635 err = -ENOMEM;
636 goto done;
637 }
638
09fd0de5 639 hci_dev_lock(hdev);
1da177e4 640 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 641 hci_dev_unlock(hdev);
1da177e4
LT
642
643 BT_DBG("num_rsp %d", ir.num_rsp);
644
645 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
646 ptr += sizeof(ir);
647 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 648 ir.num_rsp))
1da177e4 649 err = -EFAULT;
8e87d142 650 } else
1da177e4
LT
651 err = -EFAULT;
652
653 kfree(buf);
654
655done:
656 hci_dev_put(hdev);
657 return err;
658}
659
660/* ---- HCI ioctl helpers ---- */
661
662int hci_dev_open(__u16 dev)
663{
664 struct hci_dev *hdev;
665 int ret = 0;
666
5a08ecce
AE
667 hdev = hci_dev_get(dev);
668 if (!hdev)
1da177e4
LT
669 return -ENODEV;
670
671 BT_DBG("%s %p", hdev->name, hdev);
672
673 hci_req_lock(hdev);
674
94324962
JH
675 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
676 ret = -ENODEV;
677 goto done;
678 }
679
611b30f7
MH
680 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
681 ret = -ERFKILL;
682 goto done;
683 }
684
1da177e4
LT
685 if (test_bit(HCI_UP, &hdev->flags)) {
686 ret = -EALREADY;
687 goto done;
688 }
689
690 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
691 set_bit(HCI_RAW, &hdev->flags);
692
07e3b94a
AE
693 /* Treat all non BR/EDR controllers as raw devices if
694 enable_hs is not set */
695 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
696 set_bit(HCI_RAW, &hdev->flags);
697
1da177e4
LT
698 if (hdev->open(hdev)) {
699 ret = -EIO;
700 goto done;
701 }
702
703 if (!test_bit(HCI_RAW, &hdev->flags)) {
704 atomic_set(&hdev->cmd_cnt, 1);
705 set_bit(HCI_INIT, &hdev->flags);
a5040efa 706 hdev->init_last_cmd = 0;
1da177e4 707
04837f64 708 ret = __hci_request(hdev, hci_init_req, 0,
a8c5fb1a 709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 710
eead27da 711 if (lmp_host_le_capable(hdev))
6ed58ec5 712 ret = __hci_request(hdev, hci_le_init_req, 0,
a8c5fb1a 713 msecs_to_jiffies(HCI_INIT_TIMEOUT));
6ed58ec5 714
1da177e4
LT
715 clear_bit(HCI_INIT, &hdev->flags);
716 }
717
718 if (!ret) {
719 hci_dev_hold(hdev);
720 set_bit(HCI_UP, &hdev->flags);
721 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 722 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 723 hci_dev_lock(hdev);
744cf19e 724 mgmt_powered(hdev, 1);
09fd0de5 725 hci_dev_unlock(hdev);
56e5cb86 726 }
8e87d142 727 } else {
1da177e4 728 /* Init failed, cleanup */
3eff45ea 729 flush_work(&hdev->tx_work);
c347b765 730 flush_work(&hdev->cmd_work);
b78752cc 731 flush_work(&hdev->rx_work);
1da177e4
LT
732
733 skb_queue_purge(&hdev->cmd_q);
734 skb_queue_purge(&hdev->rx_q);
735
736 if (hdev->flush)
737 hdev->flush(hdev);
738
739 if (hdev->sent_cmd) {
740 kfree_skb(hdev->sent_cmd);
741 hdev->sent_cmd = NULL;
742 }
743
744 hdev->close(hdev);
745 hdev->flags = 0;
746 }
747
748done:
749 hci_req_unlock(hdev);
750 hci_dev_put(hdev);
751 return ret;
752}
753
754static int hci_dev_do_close(struct hci_dev *hdev)
755{
756 BT_DBG("%s %p", hdev->name, hdev);
757
28b75a89
AG
758 cancel_work_sync(&hdev->le_scan);
759
1da177e4
LT
760 hci_req_cancel(hdev, ENODEV);
761 hci_req_lock(hdev);
762
763 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 764 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
765 hci_req_unlock(hdev);
766 return 0;
767 }
768
3eff45ea
GP
769 /* Flush RX and TX works */
770 flush_work(&hdev->tx_work);
b78752cc 771 flush_work(&hdev->rx_work);
1da177e4 772
16ab91ab 773 if (hdev->discov_timeout > 0) {
e0f9309f 774 cancel_delayed_work(&hdev->discov_off);
16ab91ab 775 hdev->discov_timeout = 0;
5e5282bb 776 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
777 }
778
a8b2d5c2 779 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
780 cancel_delayed_work(&hdev->service_cache);
781
7ba8b4be
AG
782 cancel_delayed_work_sync(&hdev->le_scan_disable);
783
09fd0de5 784 hci_dev_lock(hdev);
1da177e4
LT
785 inquiry_cache_flush(hdev);
786 hci_conn_hash_flush(hdev);
09fd0de5 787 hci_dev_unlock(hdev);
1da177e4
LT
788
789 hci_notify(hdev, HCI_DEV_DOWN);
790
791 if (hdev->flush)
792 hdev->flush(hdev);
793
794 /* Reset device */
795 skb_queue_purge(&hdev->cmd_q);
796 atomic_set(&hdev->cmd_cnt, 1);
8af59467 797 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 798 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 799 set_bit(HCI_INIT, &hdev->flags);
04837f64 800 __hci_request(hdev, hci_reset_req, 0,
a8c5fb1a 801 msecs_to_jiffies(250));
1da177e4
LT
802 clear_bit(HCI_INIT, &hdev->flags);
803 }
804
c347b765
GP
805 /* flush cmd work */
806 flush_work(&hdev->cmd_work);
1da177e4
LT
807
808 /* Drop queues */
809 skb_queue_purge(&hdev->rx_q);
810 skb_queue_purge(&hdev->cmd_q);
811 skb_queue_purge(&hdev->raw_q);
812
813 /* Drop last sent command */
814 if (hdev->sent_cmd) {
b79f44c1 815 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
816 kfree_skb(hdev->sent_cmd);
817 hdev->sent_cmd = NULL;
818 }
819
820 /* After this point our queues are empty
821 * and no tasks are scheduled. */
822 hdev->close(hdev);
823
8ee56540
MH
824 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
825 hci_dev_lock(hdev);
826 mgmt_powered(hdev, 0);
827 hci_dev_unlock(hdev);
828 }
5add6af8 829
1da177e4
LT
830 /* Clear flags */
831 hdev->flags = 0;
832
e59fda8d 833 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 834 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 835
1da177e4
LT
836 hci_req_unlock(hdev);
837
838 hci_dev_put(hdev);
839 return 0;
840}
841
842int hci_dev_close(__u16 dev)
843{
844 struct hci_dev *hdev;
845 int err;
846
70f23020
AE
847 hdev = hci_dev_get(dev);
848 if (!hdev)
1da177e4 849 return -ENODEV;
8ee56540
MH
850
851 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
852 cancel_delayed_work(&hdev->power_off);
853
1da177e4 854 err = hci_dev_do_close(hdev);
8ee56540 855
1da177e4
LT
856 hci_dev_put(hdev);
857 return err;
858}
859
860int hci_dev_reset(__u16 dev)
861{
862 struct hci_dev *hdev;
863 int ret = 0;
864
70f23020
AE
865 hdev = hci_dev_get(dev);
866 if (!hdev)
1da177e4
LT
867 return -ENODEV;
868
869 hci_req_lock(hdev);
1da177e4
LT
870
871 if (!test_bit(HCI_UP, &hdev->flags))
872 goto done;
873
874 /* Drop queues */
875 skb_queue_purge(&hdev->rx_q);
876 skb_queue_purge(&hdev->cmd_q);
877
09fd0de5 878 hci_dev_lock(hdev);
1da177e4
LT
879 inquiry_cache_flush(hdev);
880 hci_conn_hash_flush(hdev);
09fd0de5 881 hci_dev_unlock(hdev);
1da177e4
LT
882
883 if (hdev->flush)
884 hdev->flush(hdev);
885
8e87d142 886 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 887 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
888
889 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64 890 ret = __hci_request(hdev, hci_reset_req, 0,
a8c5fb1a 891 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
892
893done:
1da177e4
LT
894 hci_req_unlock(hdev);
895 hci_dev_put(hdev);
896 return ret;
897}
898
899int hci_dev_reset_stat(__u16 dev)
900{
901 struct hci_dev *hdev;
902 int ret = 0;
903
70f23020
AE
904 hdev = hci_dev_get(dev);
905 if (!hdev)
1da177e4
LT
906 return -ENODEV;
907
908 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
909
910 hci_dev_put(hdev);
911
912 return ret;
913}
914
915int hci_dev_cmd(unsigned int cmd, void __user *arg)
916{
917 struct hci_dev *hdev;
918 struct hci_dev_req dr;
919 int err = 0;
920
921 if (copy_from_user(&dr, arg, sizeof(dr)))
922 return -EFAULT;
923
70f23020
AE
924 hdev = hci_dev_get(dr.dev_id);
925 if (!hdev)
1da177e4
LT
926 return -ENODEV;
927
928 switch (cmd) {
929 case HCISETAUTH:
04837f64 930 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
a8c5fb1a 931 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
932 break;
933
934 case HCISETENCRYPT:
935 if (!lmp_encrypt_capable(hdev)) {
936 err = -EOPNOTSUPP;
937 break;
938 }
939
940 if (!test_bit(HCI_AUTH, &hdev->flags)) {
941 /* Auth must be enabled first */
04837f64 942 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
a8c5fb1a 943 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
944 if (err)
945 break;
946 }
947
04837f64 948 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
a8c5fb1a 949 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
950 break;
951
952 case HCISETSCAN:
04837f64 953 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
a8c5fb1a 954 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
955 break;
956
1da177e4 957 case HCISETLINKPOL:
e4e8e37c 958 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
a8c5fb1a 959 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
960 break;
961
962 case HCISETLINKMODE:
e4e8e37c
MH
963 hdev->link_mode = ((__u16) dr.dev_opt) &
964 (HCI_LM_MASTER | HCI_LM_ACCEPT);
965 break;
966
967 case HCISETPTYPE:
968 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
969 break;
970
971 case HCISETACLMTU:
e4e8e37c
MH
972 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
973 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
974 break;
975
976 case HCISETSCOMTU:
e4e8e37c
MH
977 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
978 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
979 break;
980
981 default:
982 err = -EINVAL;
983 break;
984 }
e4e8e37c 985
1da177e4
LT
986 hci_dev_put(hdev);
987 return err;
988}
989
990int hci_get_dev_list(void __user *arg)
991{
8035ded4 992 struct hci_dev *hdev;
1da177e4
LT
993 struct hci_dev_list_req *dl;
994 struct hci_dev_req *dr;
1da177e4
LT
995 int n = 0, size, err;
996 __u16 dev_num;
997
998 if (get_user(dev_num, (__u16 __user *) arg))
999 return -EFAULT;
1000
1001 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1002 return -EINVAL;
1003
1004 size = sizeof(*dl) + dev_num * sizeof(*dr);
1005
70f23020
AE
1006 dl = kzalloc(size, GFP_KERNEL);
1007 if (!dl)
1da177e4
LT
1008 return -ENOMEM;
1009
1010 dr = dl->dev_req;
1011
f20d09d5 1012 read_lock(&hci_dev_list_lock);
8035ded4 1013 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1014 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1015 cancel_delayed_work(&hdev->power_off);
c542a06c 1016
a8b2d5c2
JH
1017 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1018 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1019
1da177e4
LT
1020 (dr + n)->dev_id = hdev->id;
1021 (dr + n)->dev_opt = hdev->flags;
c542a06c 1022
1da177e4
LT
1023 if (++n >= dev_num)
1024 break;
1025 }
f20d09d5 1026 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1027
1028 dl->dev_num = n;
1029 size = sizeof(*dl) + n * sizeof(*dr);
1030
1031 err = copy_to_user(arg, dl, size);
1032 kfree(dl);
1033
1034 return err ? -EFAULT : 0;
1035}
1036
1037int hci_get_dev_info(void __user *arg)
1038{
1039 struct hci_dev *hdev;
1040 struct hci_dev_info di;
1041 int err = 0;
1042
1043 if (copy_from_user(&di, arg, sizeof(di)))
1044 return -EFAULT;
1045
70f23020
AE
1046 hdev = hci_dev_get(di.dev_id);
1047 if (!hdev)
1da177e4
LT
1048 return -ENODEV;
1049
a8b2d5c2 1050 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1051 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1052
a8b2d5c2
JH
1053 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1054 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1055
1da177e4
LT
1056 strcpy(di.name, hdev->name);
1057 di.bdaddr = hdev->bdaddr;
943da25d 1058 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1059 di.flags = hdev->flags;
1060 di.pkt_type = hdev->pkt_type;
1061 di.acl_mtu = hdev->acl_mtu;
1062 di.acl_pkts = hdev->acl_pkts;
1063 di.sco_mtu = hdev->sco_mtu;
1064 di.sco_pkts = hdev->sco_pkts;
1065 di.link_policy = hdev->link_policy;
1066 di.link_mode = hdev->link_mode;
1067
1068 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1069 memcpy(&di.features, &hdev->features, sizeof(di.features));
1070
1071 if (copy_to_user(arg, &di, sizeof(di)))
1072 err = -EFAULT;
1073
1074 hci_dev_put(hdev);
1075
1076 return err;
1077}
1078
1079/* ---- Interface to HCI drivers ---- */
1080
611b30f7
MH
1081static int hci_rfkill_set_block(void *data, bool blocked)
1082{
1083 struct hci_dev *hdev = data;
1084
1085 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1086
1087 if (!blocked)
1088 return 0;
1089
1090 hci_dev_do_close(hdev);
1091
1092 return 0;
1093}
1094
1095static const struct rfkill_ops hci_rfkill_ops = {
1096 .set_block = hci_rfkill_set_block,
1097};
1098
ab81cbf9
JH
1099static void hci_power_on(struct work_struct *work)
1100{
1101 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1102
1103 BT_DBG("%s", hdev->name);
1104
1105 if (hci_dev_open(hdev->id) < 0)
1106 return;
1107
a8b2d5c2 1108 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1109 schedule_delayed_work(&hdev->power_off,
a8c5fb1a 1110 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1111
a8b2d5c2 1112 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1113 mgmt_index_added(hdev);
ab81cbf9
JH
1114}
1115
1116static void hci_power_off(struct work_struct *work)
1117{
3243553f 1118 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1119 power_off.work);
ab81cbf9
JH
1120
1121 BT_DBG("%s", hdev->name);
1122
8ee56540 1123 hci_dev_do_close(hdev);
ab81cbf9
JH
1124}
1125
16ab91ab
JH
1126static void hci_discov_off(struct work_struct *work)
1127{
1128 struct hci_dev *hdev;
1129 u8 scan = SCAN_PAGE;
1130
1131 hdev = container_of(work, struct hci_dev, discov_off.work);
1132
1133 BT_DBG("%s", hdev->name);
1134
09fd0de5 1135 hci_dev_lock(hdev);
16ab91ab
JH
1136
1137 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1138
1139 hdev->discov_timeout = 0;
1140
09fd0de5 1141 hci_dev_unlock(hdev);
16ab91ab
JH
1142}
1143
2aeb9a1a
JH
1144int hci_uuids_clear(struct hci_dev *hdev)
1145{
1146 struct list_head *p, *n;
1147
1148 list_for_each_safe(p, n, &hdev->uuids) {
1149 struct bt_uuid *uuid;
1150
1151 uuid = list_entry(p, struct bt_uuid, list);
1152
1153 list_del(p);
1154 kfree(uuid);
1155 }
1156
1157 return 0;
1158}
1159
55ed8ca1
JH
1160int hci_link_keys_clear(struct hci_dev *hdev)
1161{
1162 struct list_head *p, *n;
1163
1164 list_for_each_safe(p, n, &hdev->link_keys) {
1165 struct link_key *key;
1166
1167 key = list_entry(p, struct link_key, list);
1168
1169 list_del(p);
1170 kfree(key);
1171 }
1172
1173 return 0;
1174}
1175
b899efaf
VCG
1176int hci_smp_ltks_clear(struct hci_dev *hdev)
1177{
1178 struct smp_ltk *k, *tmp;
1179
1180 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1181 list_del(&k->list);
1182 kfree(k);
1183 }
1184
1185 return 0;
1186}
1187
55ed8ca1
JH
1188struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1189{
8035ded4 1190 struct link_key *k;
55ed8ca1 1191
8035ded4 1192 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1193 if (bacmp(bdaddr, &k->bdaddr) == 0)
1194 return k;
55ed8ca1
JH
1195
1196 return NULL;
1197}
1198
745c0ce3 1199static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1200 u8 key_type, u8 old_key_type)
d25e28ab
JH
1201{
1202 /* Legacy key */
1203 if (key_type < 0x03)
745c0ce3 1204 return true;
d25e28ab
JH
1205
1206 /* Debug keys are insecure so don't store them persistently */
1207 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1208 return false;
d25e28ab
JH
1209
1210 /* Changed combination key and there's no previous one */
1211 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1212 return false;
d25e28ab
JH
1213
1214 /* Security mode 3 case */
1215 if (!conn)
745c0ce3 1216 return true;
d25e28ab
JH
1217
1218 /* Neither local nor remote side had no-bonding as requirement */
1219 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1220 return true;
d25e28ab
JH
1221
1222 /* Local side had dedicated bonding as requirement */
1223 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1224 return true;
d25e28ab
JH
1225
1226 /* Remote side had dedicated bonding as requirement */
1227 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1228 return true;
d25e28ab
JH
1229
1230 /* If none of the above criteria match, then don't store the key
1231 * persistently */
745c0ce3 1232 return false;
d25e28ab
JH
1233}
1234
c9839a11 1235struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1236{
c9839a11 1237 struct smp_ltk *k;
75d262c2 1238
c9839a11
VCG
1239 list_for_each_entry(k, &hdev->long_term_keys, list) {
1240 if (k->ediv != ediv ||
a8c5fb1a 1241 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1242 continue;
1243
c9839a11 1244 return k;
75d262c2
VCG
1245 }
1246
1247 return NULL;
1248}
1249EXPORT_SYMBOL(hci_find_ltk);
1250
c9839a11 1251struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1252 u8 addr_type)
75d262c2 1253{
c9839a11 1254 struct smp_ltk *k;
75d262c2 1255
c9839a11
VCG
1256 list_for_each_entry(k, &hdev->long_term_keys, list)
1257 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1258 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1259 return k;
1260
1261 return NULL;
1262}
c9839a11 1263EXPORT_SYMBOL(hci_find_ltk_by_addr);
75d262c2 1264
d25e28ab 1265int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1266 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1267{
1268 struct link_key *key, *old_key;
745c0ce3
VA
1269 u8 old_key_type;
1270 bool persistent;
55ed8ca1
JH
1271
1272 old_key = hci_find_link_key(hdev, bdaddr);
1273 if (old_key) {
1274 old_key_type = old_key->type;
1275 key = old_key;
1276 } else {
12adcf3a 1277 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1278 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1279 if (!key)
1280 return -ENOMEM;
1281 list_add(&key->list, &hdev->link_keys);
1282 }
1283
1284 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1285
d25e28ab
JH
1286 /* Some buggy controller combinations generate a changed
1287 * combination key for legacy pairing even when there's no
1288 * previous key */
1289 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1290 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1291 type = HCI_LK_COMBINATION;
655fe6ec
JH
1292 if (conn)
1293 conn->key_type = type;
1294 }
d25e28ab 1295
55ed8ca1 1296 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1297 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1298 key->pin_len = pin_len;
1299
b6020ba0 1300 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1301 key->type = old_key_type;
4748fed2
JH
1302 else
1303 key->type = type;
1304
4df378a1
JH
1305 if (!new_key)
1306 return 0;
1307
1308 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1309
744cf19e 1310 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1311
6ec5bcad
VA
1312 if (conn)
1313 conn->flush_key = !persistent;
55ed8ca1
JH
1314
1315 return 0;
1316}
1317
c9839a11 1318int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1319 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1320 ediv, u8 rand[8])
75d262c2 1321{
c9839a11 1322 struct smp_ltk *key, *old_key;
75d262c2 1323
c9839a11
VCG
1324 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1325 return 0;
75d262c2 1326
c9839a11
VCG
1327 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1328 if (old_key)
75d262c2 1329 key = old_key;
c9839a11
VCG
1330 else {
1331 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1332 if (!key)
1333 return -ENOMEM;
c9839a11 1334 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1335 }
1336
75d262c2 1337 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1338 key->bdaddr_type = addr_type;
1339 memcpy(key->val, tk, sizeof(key->val));
1340 key->authenticated = authenticated;
1341 key->ediv = ediv;
1342 key->enc_size = enc_size;
1343 key->type = type;
1344 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1345
c9839a11
VCG
1346 if (!new_key)
1347 return 0;
75d262c2 1348
261cc5aa
VCG
1349 if (type & HCI_SMP_LTK)
1350 mgmt_new_ltk(hdev, key, 1);
1351
75d262c2
VCG
1352 return 0;
1353}
1354
55ed8ca1
JH
1355int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1356{
1357 struct link_key *key;
1358
1359 key = hci_find_link_key(hdev, bdaddr);
1360 if (!key)
1361 return -ENOENT;
1362
1363 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1364
1365 list_del(&key->list);
1366 kfree(key);
1367
1368 return 0;
1369}
1370
b899efaf
VCG
1371int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1372{
1373 struct smp_ltk *k, *tmp;
1374
1375 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1376 if (bacmp(bdaddr, &k->bdaddr))
1377 continue;
1378
1379 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1380
1381 list_del(&k->list);
1382 kfree(k);
1383 }
1384
1385 return 0;
1386}
1387
6bd32326
VT
1388/* HCI command timer function */
1389static void hci_cmd_timer(unsigned long arg)
1390{
1391 struct hci_dev *hdev = (void *) arg;
1392
1393 BT_ERR("%s command tx timeout", hdev->name);
1394 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1395 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1396}
1397
2763eda6 1398struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1399 bdaddr_t *bdaddr)
2763eda6
SJ
1400{
1401 struct oob_data *data;
1402
1403 list_for_each_entry(data, &hdev->remote_oob_data, list)
1404 if (bacmp(bdaddr, &data->bdaddr) == 0)
1405 return data;
1406
1407 return NULL;
1408}
1409
1410int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1411{
1412 struct oob_data *data;
1413
1414 data = hci_find_remote_oob_data(hdev, bdaddr);
1415 if (!data)
1416 return -ENOENT;
1417
1418 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1419
1420 list_del(&data->list);
1421 kfree(data);
1422
1423 return 0;
1424}
1425
1426int hci_remote_oob_data_clear(struct hci_dev *hdev)
1427{
1428 struct oob_data *data, *n;
1429
1430 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1431 list_del(&data->list);
1432 kfree(data);
1433 }
1434
1435 return 0;
1436}
1437
1438int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1439 u8 *randomizer)
2763eda6
SJ
1440{
1441 struct oob_data *data;
1442
1443 data = hci_find_remote_oob_data(hdev, bdaddr);
1444
1445 if (!data) {
1446 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1447 if (!data)
1448 return -ENOMEM;
1449
1450 bacpy(&data->bdaddr, bdaddr);
1451 list_add(&data->list, &hdev->remote_oob_data);
1452 }
1453
1454 memcpy(data->hash, hash, sizeof(data->hash));
1455 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1456
1457 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1458
1459 return 0;
1460}
1461
04124681 1462struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1463{
8035ded4 1464 struct bdaddr_list *b;
b2a66aad 1465
8035ded4 1466 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1467 if (bacmp(bdaddr, &b->bdaddr) == 0)
1468 return b;
b2a66aad
AJ
1469
1470 return NULL;
1471}
1472
1473int hci_blacklist_clear(struct hci_dev *hdev)
1474{
1475 struct list_head *p, *n;
1476
1477 list_for_each_safe(p, n, &hdev->blacklist) {
1478 struct bdaddr_list *b;
1479
1480 b = list_entry(p, struct bdaddr_list, list);
1481
1482 list_del(p);
1483 kfree(b);
1484 }
1485
1486 return 0;
1487}
1488
88c1fe4b 1489int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1490{
1491 struct bdaddr_list *entry;
b2a66aad
AJ
1492
1493 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1494 return -EBADF;
1495
5e762444
AJ
1496 if (hci_blacklist_lookup(hdev, bdaddr))
1497 return -EEXIST;
b2a66aad
AJ
1498
1499 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1500 if (!entry)
1501 return -ENOMEM;
b2a66aad
AJ
1502
1503 bacpy(&entry->bdaddr, bdaddr);
1504
1505 list_add(&entry->list, &hdev->blacklist);
1506
88c1fe4b 1507 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1508}
1509
88c1fe4b 1510int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1511{
1512 struct bdaddr_list *entry;
b2a66aad 1513
1ec918ce 1514 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1515 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1516
1517 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1518 if (!entry)
5e762444 1519 return -ENOENT;
b2a66aad
AJ
1520
1521 list_del(&entry->list);
1522 kfree(entry);
1523
88c1fe4b 1524 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1525}
1526
7ba8b4be
AG
1527static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1528{
1529 struct le_scan_params *param = (struct le_scan_params *) opt;
1530 struct hci_cp_le_set_scan_param cp;
1531
1532 memset(&cp, 0, sizeof(cp));
1533 cp.type = param->type;
1534 cp.interval = cpu_to_le16(param->interval);
1535 cp.window = cpu_to_le16(param->window);
1536
1537 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1538}
1539
1540static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1541{
1542 struct hci_cp_le_set_scan_enable cp;
1543
1544 memset(&cp, 0, sizeof(cp));
1545 cp.enable = 1;
1546
1547 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1548}
1549
1550static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1551 u16 window, int timeout)
7ba8b4be
AG
1552{
1553 long timeo = msecs_to_jiffies(3000);
1554 struct le_scan_params param;
1555 int err;
1556
1557 BT_DBG("%s", hdev->name);
1558
1559 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1560 return -EINPROGRESS;
1561
1562 param.type = type;
1563 param.interval = interval;
1564 param.window = window;
1565
1566 hci_req_lock(hdev);
1567
1568 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
04124681 1569 timeo);
7ba8b4be
AG
1570 if (!err)
1571 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1572
1573 hci_req_unlock(hdev);
1574
1575 if (err < 0)
1576 return err;
1577
1578 schedule_delayed_work(&hdev->le_scan_disable,
04124681 1579 msecs_to_jiffies(timeout));
7ba8b4be
AG
1580
1581 return 0;
1582}
1583
7dbfac1d
AG
1584int hci_cancel_le_scan(struct hci_dev *hdev)
1585{
1586 BT_DBG("%s", hdev->name);
1587
1588 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1589 return -EALREADY;
1590
1591 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1592 struct hci_cp_le_set_scan_enable cp;
1593
1594 /* Send HCI command to disable LE Scan */
1595 memset(&cp, 0, sizeof(cp));
1596 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1597 }
1598
1599 return 0;
1600}
1601
7ba8b4be
AG
1602static void le_scan_disable_work(struct work_struct *work)
1603{
1604 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1605 le_scan_disable.work);
7ba8b4be
AG
1606 struct hci_cp_le_set_scan_enable cp;
1607
1608 BT_DBG("%s", hdev->name);
1609
1610 memset(&cp, 0, sizeof(cp));
1611
1612 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1613}
1614
28b75a89
AG
1615static void le_scan_work(struct work_struct *work)
1616{
1617 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1618 struct le_scan_params *param = &hdev->le_scan_params;
1619
1620 BT_DBG("%s", hdev->name);
1621
04124681
GP
1622 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1623 param->timeout);
28b75a89
AG
1624}
1625
1626int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1627 int timeout)
28b75a89
AG
1628{
1629 struct le_scan_params *param = &hdev->le_scan_params;
1630
1631 BT_DBG("%s", hdev->name);
1632
1633 if (work_busy(&hdev->le_scan))
1634 return -EINPROGRESS;
1635
1636 param->type = type;
1637 param->interval = interval;
1638 param->window = window;
1639 param->timeout = timeout;
1640
1641 queue_work(system_long_wq, &hdev->le_scan);
1642
1643 return 0;
1644}
1645
9be0dab7
DH
1646/* Alloc HCI device */
1647struct hci_dev *hci_alloc_dev(void)
1648{
1649 struct hci_dev *hdev;
1650
1651 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1652 if (!hdev)
1653 return NULL;
1654
b1b813d4
DH
1655 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1656 hdev->esco_type = (ESCO_HV1);
1657 hdev->link_mode = (HCI_LM_ACCEPT);
1658 hdev->io_capability = 0x03; /* No Input No Output */
1659
b1b813d4
DH
1660 hdev->sniff_max_interval = 800;
1661 hdev->sniff_min_interval = 80;
1662
1663 mutex_init(&hdev->lock);
1664 mutex_init(&hdev->req_lock);
1665
1666 INIT_LIST_HEAD(&hdev->mgmt_pending);
1667 INIT_LIST_HEAD(&hdev->blacklist);
1668 INIT_LIST_HEAD(&hdev->uuids);
1669 INIT_LIST_HEAD(&hdev->link_keys);
1670 INIT_LIST_HEAD(&hdev->long_term_keys);
1671 INIT_LIST_HEAD(&hdev->remote_oob_data);
b1b813d4
DH
1672
1673 INIT_WORK(&hdev->rx_work, hci_rx_work);
1674 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1675 INIT_WORK(&hdev->tx_work, hci_tx_work);
1676 INIT_WORK(&hdev->power_on, hci_power_on);
1677 INIT_WORK(&hdev->le_scan, le_scan_work);
1678
b1b813d4
DH
1679 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1680 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1681 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1682
9be0dab7 1683 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
1684 skb_queue_head_init(&hdev->rx_q);
1685 skb_queue_head_init(&hdev->cmd_q);
1686 skb_queue_head_init(&hdev->raw_q);
1687
1688 init_waitqueue_head(&hdev->req_wait_q);
1689
1690 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1691
b1b813d4
DH
1692 hci_init_sysfs(hdev);
1693 discovery_init(hdev);
1694 hci_conn_hash_init(hdev);
9be0dab7
DH
1695
1696 return hdev;
1697}
1698EXPORT_SYMBOL(hci_alloc_dev);
1699
1700/* Free HCI device */
1701void hci_free_dev(struct hci_dev *hdev)
1702{
1703 skb_queue_purge(&hdev->driver_init);
1704
1705 /* will free via device release */
1706 put_device(&hdev->dev);
1707}
1708EXPORT_SYMBOL(hci_free_dev);
1709
1da177e4
LT
1710/* Register HCI device */
1711int hci_register_dev(struct hci_dev *hdev)
1712{
fc50744c 1713 struct list_head *head, *p;
b1b813d4 1714 int id, error;
1da177e4 1715
010666a1 1716 if (!hdev->open || !hdev->close)
1da177e4
LT
1717 return -EINVAL;
1718
fc50744c
UF
1719 write_lock(&hci_dev_list_lock);
1720
08add513
MM
1721 /* Do not allow HCI_AMP devices to register at index 0,
1722 * so the index can be used as the AMP controller ID.
1723 */
1724 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
fc50744c 1725 head = &hci_dev_list;
1da177e4
LT
1726
1727 /* Find first available device id */
1728 list_for_each(p, &hci_dev_list) {
fc50744c
UF
1729 int nid = list_entry(p, struct hci_dev, list)->id;
1730 if (nid > id)
1da177e4 1731 break;
fc50744c
UF
1732 if (nid == id)
1733 id++;
1734 head = p;
1da177e4 1735 }
8e87d142 1736
1da177e4
LT
1737 sprintf(hdev->name, "hci%d", id);
1738 hdev->id = id;
2d8b3a11
AE
1739
1740 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1741
fc50744c 1742 list_add(&hdev->list, head);
1da177e4 1743
f20d09d5 1744 write_unlock(&hci_dev_list_lock);
1da177e4 1745
32845eb1 1746 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 1747 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1748 if (!hdev->workqueue) {
1749 error = -ENOMEM;
1750 goto err;
1751 }
f48fd9c8 1752
33ca954d
DH
1753 error = hci_add_sysfs(hdev);
1754 if (error < 0)
1755 goto err_wqueue;
1da177e4 1756
611b30f7 1757 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
1758 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1759 hdev);
611b30f7
MH
1760 if (hdev->rfkill) {
1761 if (rfkill_register(hdev->rfkill) < 0) {
1762 rfkill_destroy(hdev->rfkill);
1763 hdev->rfkill = NULL;
1764 }
1765 }
1766
a8b2d5c2
JH
1767 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1768 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1769 schedule_work(&hdev->power_on);
ab81cbf9 1770
1da177e4 1771 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1772 hci_dev_hold(hdev);
1da177e4
LT
1773
1774 return id;
f48fd9c8 1775
33ca954d
DH
1776err_wqueue:
1777 destroy_workqueue(hdev->workqueue);
1778err:
f20d09d5 1779 write_lock(&hci_dev_list_lock);
f48fd9c8 1780 list_del(&hdev->list);
f20d09d5 1781 write_unlock(&hci_dev_list_lock);
f48fd9c8 1782
33ca954d 1783 return error;
1da177e4
LT
1784}
1785EXPORT_SYMBOL(hci_register_dev);
1786
1787/* Unregister HCI device */
59735631 1788void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1789{
ef222013
MH
1790 int i;
1791
c13854ce 1792 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1793
94324962
JH
1794 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1795
f20d09d5 1796 write_lock(&hci_dev_list_lock);
1da177e4 1797 list_del(&hdev->list);
f20d09d5 1798 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1799
1800 hci_dev_do_close(hdev);
1801
cd4c5391 1802 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1803 kfree_skb(hdev->reassembly[i]);
1804
ab81cbf9 1805 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 1806 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1807 hci_dev_lock(hdev);
744cf19e 1808 mgmt_index_removed(hdev);
09fd0de5 1809 hci_dev_unlock(hdev);
56e5cb86 1810 }
ab81cbf9 1811
2e58ef3e
JH
1812 /* mgmt_index_removed should take care of emptying the
1813 * pending list */
1814 BUG_ON(!list_empty(&hdev->mgmt_pending));
1815
1da177e4
LT
1816 hci_notify(hdev, HCI_DEV_UNREG);
1817
611b30f7
MH
1818 if (hdev->rfkill) {
1819 rfkill_unregister(hdev->rfkill);
1820 rfkill_destroy(hdev->rfkill);
1821 }
1822
ce242970 1823 hci_del_sysfs(hdev);
147e2d59 1824
f48fd9c8
MH
1825 destroy_workqueue(hdev->workqueue);
1826
09fd0de5 1827 hci_dev_lock(hdev);
e2e0cacb 1828 hci_blacklist_clear(hdev);
2aeb9a1a 1829 hci_uuids_clear(hdev);
55ed8ca1 1830 hci_link_keys_clear(hdev);
b899efaf 1831 hci_smp_ltks_clear(hdev);
2763eda6 1832 hci_remote_oob_data_clear(hdev);
09fd0de5 1833 hci_dev_unlock(hdev);
e2e0cacb 1834
dc946bd8 1835 hci_dev_put(hdev);
1da177e4
LT
1836}
1837EXPORT_SYMBOL(hci_unregister_dev);
1838
1839/* Suspend HCI device */
1840int hci_suspend_dev(struct hci_dev *hdev)
1841{
1842 hci_notify(hdev, HCI_DEV_SUSPEND);
1843 return 0;
1844}
1845EXPORT_SYMBOL(hci_suspend_dev);
1846
1847/* Resume HCI device */
1848int hci_resume_dev(struct hci_dev *hdev)
1849{
1850 hci_notify(hdev, HCI_DEV_RESUME);
1851 return 0;
1852}
1853EXPORT_SYMBOL(hci_resume_dev);
1854
76bca880
MH
1855/* Receive frame from HCI drivers */
1856int hci_recv_frame(struct sk_buff *skb)
1857{
1858 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1859 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 1860 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
1861 kfree_skb(skb);
1862 return -ENXIO;
1863 }
1864
1865 /* Incomming skb */
1866 bt_cb(skb)->incoming = 1;
1867
1868 /* Time stamp */
1869 __net_timestamp(skb);
1870
76bca880 1871 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1872 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1873
76bca880
MH
1874 return 0;
1875}
1876EXPORT_SYMBOL(hci_recv_frame);
1877
33e882a5 1878static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 1879 int count, __u8 index)
33e882a5
SS
1880{
1881 int len = 0;
1882 int hlen = 0;
1883 int remain = count;
1884 struct sk_buff *skb;
1885 struct bt_skb_cb *scb;
1886
1887 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 1888 index >= NUM_REASSEMBLY)
33e882a5
SS
1889 return -EILSEQ;
1890
1891 skb = hdev->reassembly[index];
1892
1893 if (!skb) {
1894 switch (type) {
1895 case HCI_ACLDATA_PKT:
1896 len = HCI_MAX_FRAME_SIZE;
1897 hlen = HCI_ACL_HDR_SIZE;
1898 break;
1899 case HCI_EVENT_PKT:
1900 len = HCI_MAX_EVENT_SIZE;
1901 hlen = HCI_EVENT_HDR_SIZE;
1902 break;
1903 case HCI_SCODATA_PKT:
1904 len = HCI_MAX_SCO_SIZE;
1905 hlen = HCI_SCO_HDR_SIZE;
1906 break;
1907 }
1908
1e429f38 1909 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1910 if (!skb)
1911 return -ENOMEM;
1912
1913 scb = (void *) skb->cb;
1914 scb->expect = hlen;
1915 scb->pkt_type = type;
1916
1917 skb->dev = (void *) hdev;
1918 hdev->reassembly[index] = skb;
1919 }
1920
1921 while (count) {
1922 scb = (void *) skb->cb;
89bb46d0 1923 len = min_t(uint, scb->expect, count);
33e882a5
SS
1924
1925 memcpy(skb_put(skb, len), data, len);
1926
1927 count -= len;
1928 data += len;
1929 scb->expect -= len;
1930 remain = count;
1931
1932 switch (type) {
1933 case HCI_EVENT_PKT:
1934 if (skb->len == HCI_EVENT_HDR_SIZE) {
1935 struct hci_event_hdr *h = hci_event_hdr(skb);
1936 scb->expect = h->plen;
1937
1938 if (skb_tailroom(skb) < scb->expect) {
1939 kfree_skb(skb);
1940 hdev->reassembly[index] = NULL;
1941 return -ENOMEM;
1942 }
1943 }
1944 break;
1945
1946 case HCI_ACLDATA_PKT:
1947 if (skb->len == HCI_ACL_HDR_SIZE) {
1948 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1949 scb->expect = __le16_to_cpu(h->dlen);
1950
1951 if (skb_tailroom(skb) < scb->expect) {
1952 kfree_skb(skb);
1953 hdev->reassembly[index] = NULL;
1954 return -ENOMEM;
1955 }
1956 }
1957 break;
1958
1959 case HCI_SCODATA_PKT:
1960 if (skb->len == HCI_SCO_HDR_SIZE) {
1961 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1962 scb->expect = h->dlen;
1963
1964 if (skb_tailroom(skb) < scb->expect) {
1965 kfree_skb(skb);
1966 hdev->reassembly[index] = NULL;
1967 return -ENOMEM;
1968 }
1969 }
1970 break;
1971 }
1972
1973 if (scb->expect == 0) {
1974 /* Complete frame */
1975
1976 bt_cb(skb)->pkt_type = type;
1977 hci_recv_frame(skb);
1978
1979 hdev->reassembly[index] = NULL;
1980 return remain;
1981 }
1982 }
1983
1984 return remain;
1985}
1986
ef222013
MH
1987int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1988{
f39a3c06
SS
1989 int rem = 0;
1990
ef222013
MH
1991 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1992 return -EILSEQ;
1993
da5f6c37 1994 while (count) {
1e429f38 1995 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1996 if (rem < 0)
1997 return rem;
ef222013 1998
f39a3c06
SS
1999 data += (count - rem);
2000 count = rem;
f81c6224 2001 }
ef222013 2002
f39a3c06 2003 return rem;
ef222013
MH
2004}
2005EXPORT_SYMBOL(hci_recv_fragment);
2006
99811510
SS
2007#define STREAM_REASSEMBLY 0
2008
2009int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2010{
2011 int type;
2012 int rem = 0;
2013
da5f6c37 2014 while (count) {
99811510
SS
2015 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2016
2017 if (!skb) {
2018 struct { char type; } *pkt;
2019
2020 /* Start of the frame */
2021 pkt = data;
2022 type = pkt->type;
2023
2024 data++;
2025 count--;
2026 } else
2027 type = bt_cb(skb)->pkt_type;
2028
1e429f38 2029 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2030 STREAM_REASSEMBLY);
99811510
SS
2031 if (rem < 0)
2032 return rem;
2033
2034 data += (count - rem);
2035 count = rem;
f81c6224 2036 }
99811510
SS
2037
2038 return rem;
2039}
2040EXPORT_SYMBOL(hci_recv_stream_fragment);
2041
1da177e4
LT
2042/* ---- Interface to upper protocols ---- */
2043
1da177e4
LT
2044int hci_register_cb(struct hci_cb *cb)
2045{
2046 BT_DBG("%p name %s", cb, cb->name);
2047
f20d09d5 2048 write_lock(&hci_cb_list_lock);
1da177e4 2049 list_add(&cb->list, &hci_cb_list);
f20d09d5 2050 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2051
2052 return 0;
2053}
2054EXPORT_SYMBOL(hci_register_cb);
2055
2056int hci_unregister_cb(struct hci_cb *cb)
2057{
2058 BT_DBG("%p name %s", cb, cb->name);
2059
f20d09d5 2060 write_lock(&hci_cb_list_lock);
1da177e4 2061 list_del(&cb->list);
f20d09d5 2062 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2063
2064 return 0;
2065}
2066EXPORT_SYMBOL(hci_unregister_cb);
2067
2068static int hci_send_frame(struct sk_buff *skb)
2069{
2070 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2071
2072 if (!hdev) {
2073 kfree_skb(skb);
2074 return -ENODEV;
2075 }
2076
0d48d939 2077 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2078
cd82e61c
MH
2079 /* Time stamp */
2080 __net_timestamp(skb);
1da177e4 2081
cd82e61c
MH
2082 /* Send copy to monitor */
2083 hci_send_to_monitor(hdev, skb);
2084
2085 if (atomic_read(&hdev->promisc)) {
2086 /* Send copy to the sockets */
470fe1b5 2087 hci_send_to_sock(hdev, skb);
1da177e4
LT
2088 }
2089
2090 /* Get rid of skb owner, prior to sending to the driver. */
2091 skb_orphan(skb);
2092
2093 return hdev->send(skb);
2094}
2095
2096/* Send HCI command */
a9de9248 2097int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2098{
2099 int len = HCI_COMMAND_HDR_SIZE + plen;
2100 struct hci_command_hdr *hdr;
2101 struct sk_buff *skb;
2102
a9de9248 2103 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2104
2105 skb = bt_skb_alloc(len, GFP_ATOMIC);
2106 if (!skb) {
ef222013 2107 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2108 return -ENOMEM;
2109 }
2110
2111 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2112 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2113 hdr->plen = plen;
2114
2115 if (plen)
2116 memcpy(skb_put(skb, plen), param, plen);
2117
2118 BT_DBG("skb len %d", skb->len);
2119
0d48d939 2120 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2121 skb->dev = (void *) hdev;
c78ae283 2122
a5040efa
JH
2123 if (test_bit(HCI_INIT, &hdev->flags))
2124 hdev->init_last_cmd = opcode;
2125
1da177e4 2126 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2127 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2128
2129 return 0;
2130}
1da177e4
LT
2131
2132/* Get data from the previously sent command */
a9de9248 2133void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2134{
2135 struct hci_command_hdr *hdr;
2136
2137 if (!hdev->sent_cmd)
2138 return NULL;
2139
2140 hdr = (void *) hdev->sent_cmd->data;
2141
a9de9248 2142 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2143 return NULL;
2144
a9de9248 2145 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2146
2147 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2148}
2149
2150/* Send ACL data */
2151static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2152{
2153 struct hci_acl_hdr *hdr;
2154 int len = skb->len;
2155
badff6d0
ACM
2156 skb_push(skb, HCI_ACL_HDR_SIZE);
2157 skb_reset_transport_header(skb);
9c70220b 2158 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2159 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2160 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2161}
2162
73d80deb 2163static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
a8c5fb1a 2164 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2165{
2166 struct hci_dev *hdev = conn->hdev;
2167 struct sk_buff *list;
2168
087bfd99
GP
2169 skb->len = skb_headlen(skb);
2170 skb->data_len = 0;
2171
2172 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2173 hci_add_acl_hdr(skb, conn->handle, flags);
2174
70f23020
AE
2175 list = skb_shinfo(skb)->frag_list;
2176 if (!list) {
1da177e4
LT
2177 /* Non fragmented */
2178 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2179
73d80deb 2180 skb_queue_tail(queue, skb);
1da177e4
LT
2181 } else {
2182 /* Fragmented */
2183 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2184
2185 skb_shinfo(skb)->frag_list = NULL;
2186
2187 /* Queue all fragments atomically */
af3e6359 2188 spin_lock(&queue->lock);
1da177e4 2189
73d80deb 2190 __skb_queue_tail(queue, skb);
e702112f
AE
2191
2192 flags &= ~ACL_START;
2193 flags |= ACL_CONT;
1da177e4
LT
2194 do {
2195 skb = list; list = list->next;
8e87d142 2196
1da177e4 2197 skb->dev = (void *) hdev;
0d48d939 2198 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2199 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2200
2201 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2202
73d80deb 2203 __skb_queue_tail(queue, skb);
1da177e4
LT
2204 } while (list);
2205
af3e6359 2206 spin_unlock(&queue->lock);
1da177e4 2207 }
73d80deb
LAD
2208}
2209
2210void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2211{
2212 struct hci_conn *conn = chan->conn;
2213 struct hci_dev *hdev = conn->hdev;
2214
2215 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2216
2217 skb->dev = (void *) hdev;
73d80deb
LAD
2218
2219 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2220
3eff45ea 2221 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2222}
2223EXPORT_SYMBOL(hci_send_acl);
2224
2225/* Send SCO data */
0d861d8b 2226void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2227{
2228 struct hci_dev *hdev = conn->hdev;
2229 struct hci_sco_hdr hdr;
2230
2231 BT_DBG("%s len %d", hdev->name, skb->len);
2232
aca3192c 2233 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2234 hdr.dlen = skb->len;
2235
badff6d0
ACM
2236 skb_push(skb, HCI_SCO_HDR_SIZE);
2237 skb_reset_transport_header(skb);
9c70220b 2238 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2239
2240 skb->dev = (void *) hdev;
0d48d939 2241 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2242
1da177e4 2243 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2244 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2245}
2246EXPORT_SYMBOL(hci_send_sco);
2247
2248/* ---- HCI TX task (outgoing data) ---- */
2249
2250/* HCI Connection scheduler */
6039aa73
GP
2251static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2252 int *quote)
1da177e4
LT
2253{
2254 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2255 struct hci_conn *conn = NULL, *c;
abc5de8f 2256 unsigned int num = 0, min = ~0;
1da177e4 2257
8e87d142 2258 /* We don't have to lock device here. Connections are always
1da177e4 2259 * added and removed with TX task disabled. */
bf4c6325
GP
2260
2261 rcu_read_lock();
2262
2263 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2264 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2265 continue;
769be974
MH
2266
2267 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2268 continue;
2269
1da177e4
LT
2270 num++;
2271
2272 if (c->sent < min) {
2273 min = c->sent;
2274 conn = c;
2275 }
52087a79
LAD
2276
2277 if (hci_conn_num(hdev, type) == num)
2278 break;
1da177e4
LT
2279 }
2280
bf4c6325
GP
2281 rcu_read_unlock();
2282
1da177e4 2283 if (conn) {
6ed58ec5
VT
2284 int cnt, q;
2285
2286 switch (conn->type) {
2287 case ACL_LINK:
2288 cnt = hdev->acl_cnt;
2289 break;
2290 case SCO_LINK:
2291 case ESCO_LINK:
2292 cnt = hdev->sco_cnt;
2293 break;
2294 case LE_LINK:
2295 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2296 break;
2297 default:
2298 cnt = 0;
2299 BT_ERR("Unknown link type");
2300 }
2301
2302 q = cnt / num;
1da177e4
LT
2303 *quote = q ? q : 1;
2304 } else
2305 *quote = 0;
2306
2307 BT_DBG("conn %p quote %d", conn, *quote);
2308 return conn;
2309}
2310
6039aa73 2311static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2312{
2313 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2314 struct hci_conn *c;
1da177e4 2315
bae1f5d9 2316 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2317
bf4c6325
GP
2318 rcu_read_lock();
2319
1da177e4 2320 /* Kill stalled connections */
bf4c6325 2321 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2322 if (c->type == type && c->sent) {
2323 BT_ERR("%s killing stalled connection %s",
a8c5fb1a 2324 hdev->name, batostr(&c->dst));
1da177e4
LT
2325 hci_acl_disconn(c, 0x13);
2326 }
2327 }
bf4c6325
GP
2328
2329 rcu_read_unlock();
1da177e4
LT
2330}
2331
6039aa73
GP
2332static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2333 int *quote)
1da177e4 2334{
73d80deb
LAD
2335 struct hci_conn_hash *h = &hdev->conn_hash;
2336 struct hci_chan *chan = NULL;
abc5de8f 2337 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2338 struct hci_conn *conn;
73d80deb
LAD
2339 int cnt, q, conn_num = 0;
2340
2341 BT_DBG("%s", hdev->name);
2342
bf4c6325
GP
2343 rcu_read_lock();
2344
2345 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2346 struct hci_chan *tmp;
2347
2348 if (conn->type != type)
2349 continue;
2350
2351 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2352 continue;
2353
2354 conn_num++;
2355
8192edef 2356 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2357 struct sk_buff *skb;
2358
2359 if (skb_queue_empty(&tmp->data_q))
2360 continue;
2361
2362 skb = skb_peek(&tmp->data_q);
2363 if (skb->priority < cur_prio)
2364 continue;
2365
2366 if (skb->priority > cur_prio) {
2367 num = 0;
2368 min = ~0;
2369 cur_prio = skb->priority;
2370 }
2371
2372 num++;
2373
2374 if (conn->sent < min) {
2375 min = conn->sent;
2376 chan = tmp;
2377 }
2378 }
2379
2380 if (hci_conn_num(hdev, type) == conn_num)
2381 break;
2382 }
2383
bf4c6325
GP
2384 rcu_read_unlock();
2385
73d80deb
LAD
2386 if (!chan)
2387 return NULL;
2388
2389 switch (chan->conn->type) {
2390 case ACL_LINK:
2391 cnt = hdev->acl_cnt;
2392 break;
2393 case SCO_LINK:
2394 case ESCO_LINK:
2395 cnt = hdev->sco_cnt;
2396 break;
2397 case LE_LINK:
2398 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2399 break;
2400 default:
2401 cnt = 0;
2402 BT_ERR("Unknown link type");
2403 }
2404
2405 q = cnt / num;
2406 *quote = q ? q : 1;
2407 BT_DBG("chan %p quote %d", chan, *quote);
2408 return chan;
2409}
2410
02b20f0b
LAD
2411static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2412{
2413 struct hci_conn_hash *h = &hdev->conn_hash;
2414 struct hci_conn *conn;
2415 int num = 0;
2416
2417 BT_DBG("%s", hdev->name);
2418
bf4c6325
GP
2419 rcu_read_lock();
2420
2421 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2422 struct hci_chan *chan;
2423
2424 if (conn->type != type)
2425 continue;
2426
2427 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2428 continue;
2429
2430 num++;
2431
8192edef 2432 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2433 struct sk_buff *skb;
2434
2435 if (chan->sent) {
2436 chan->sent = 0;
2437 continue;
2438 }
2439
2440 if (skb_queue_empty(&chan->data_q))
2441 continue;
2442
2443 skb = skb_peek(&chan->data_q);
2444 if (skb->priority >= HCI_PRIO_MAX - 1)
2445 continue;
2446
2447 skb->priority = HCI_PRIO_MAX - 1;
2448
2449 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2450 skb->priority);
02b20f0b
LAD
2451 }
2452
2453 if (hci_conn_num(hdev, type) == num)
2454 break;
2455 }
bf4c6325
GP
2456
2457 rcu_read_unlock();
2458
02b20f0b
LAD
2459}
2460
b71d385a
AE
2461static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2462{
2463 /* Calculate count of blocks used by this packet */
2464 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2465}
2466
6039aa73 2467static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2468{
1da177e4
LT
2469 if (!test_bit(HCI_RAW, &hdev->flags)) {
2470 /* ACL tx timeout must be longer than maximum
2471 * link supervision timeout (40.9 seconds) */
63d2bc1b 2472 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
a8c5fb1a 2473 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2474 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2475 }
63d2bc1b 2476}
1da177e4 2477
6039aa73 2478static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2479{
2480 unsigned int cnt = hdev->acl_cnt;
2481 struct hci_chan *chan;
2482 struct sk_buff *skb;
2483 int quote;
2484
2485 __check_timeout(hdev, cnt);
04837f64 2486
73d80deb 2487 while (hdev->acl_cnt &&
a8c5fb1a 2488 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2489 u32 priority = (skb_peek(&chan->data_q))->priority;
2490 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2491 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2492 skb->len, skb->priority);
73d80deb 2493
ec1cce24
LAD
2494 /* Stop if priority has changed */
2495 if (skb->priority < priority)
2496 break;
2497
2498 skb = skb_dequeue(&chan->data_q);
2499
73d80deb 2500 hci_conn_enter_active_mode(chan->conn,
04124681 2501 bt_cb(skb)->force_active);
04837f64 2502
1da177e4
LT
2503 hci_send_frame(skb);
2504 hdev->acl_last_tx = jiffies;
2505
2506 hdev->acl_cnt--;
73d80deb
LAD
2507 chan->sent++;
2508 chan->conn->sent++;
1da177e4
LT
2509 }
2510 }
02b20f0b
LAD
2511
2512 if (cnt != hdev->acl_cnt)
2513 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2514}
2515
6039aa73 2516static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2517{
63d2bc1b 2518 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2519 struct hci_chan *chan;
2520 struct sk_buff *skb;
2521 int quote;
b71d385a 2522
63d2bc1b 2523 __check_timeout(hdev, cnt);
b71d385a
AE
2524
2525 while (hdev->block_cnt > 0 &&
a8c5fb1a 2526 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
b71d385a
AE
2527 u32 priority = (skb_peek(&chan->data_q))->priority;
2528 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2529 int blocks;
2530
2531 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2532 skb->len, skb->priority);
b71d385a
AE
2533
2534 /* Stop if priority has changed */
2535 if (skb->priority < priority)
2536 break;
2537
2538 skb = skb_dequeue(&chan->data_q);
2539
2540 blocks = __get_blocks(hdev, skb);
2541 if (blocks > hdev->block_cnt)
2542 return;
2543
2544 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2545 bt_cb(skb)->force_active);
b71d385a
AE
2546
2547 hci_send_frame(skb);
2548 hdev->acl_last_tx = jiffies;
2549
2550 hdev->block_cnt -= blocks;
2551 quote -= blocks;
2552
2553 chan->sent += blocks;
2554 chan->conn->sent += blocks;
2555 }
2556 }
2557
2558 if (cnt != hdev->block_cnt)
2559 hci_prio_recalculate(hdev, ACL_LINK);
2560}
2561
6039aa73 2562static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
2563{
2564 BT_DBG("%s", hdev->name);
2565
2566 if (!hci_conn_num(hdev, ACL_LINK))
2567 return;
2568
2569 switch (hdev->flow_ctl_mode) {
2570 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2571 hci_sched_acl_pkt(hdev);
2572 break;
2573
2574 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2575 hci_sched_acl_blk(hdev);
2576 break;
2577 }
2578}
2579
1da177e4 2580/* Schedule SCO */
6039aa73 2581static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
2582{
2583 struct hci_conn *conn;
2584 struct sk_buff *skb;
2585 int quote;
2586
2587 BT_DBG("%s", hdev->name);
2588
52087a79
LAD
2589 if (!hci_conn_num(hdev, SCO_LINK))
2590 return;
2591
1da177e4
LT
2592 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2593 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2594 BT_DBG("skb %p len %d", skb, skb->len);
2595 hci_send_frame(skb);
2596
2597 conn->sent++;
2598 if (conn->sent == ~0)
2599 conn->sent = 0;
2600 }
2601 }
2602}
2603
6039aa73 2604static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
2605{
2606 struct hci_conn *conn;
2607 struct sk_buff *skb;
2608 int quote;
2609
2610 BT_DBG("%s", hdev->name);
2611
52087a79
LAD
2612 if (!hci_conn_num(hdev, ESCO_LINK))
2613 return;
2614
8fc9ced3
GP
2615 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2616 &quote))) {
b6a0dc82
MH
2617 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2618 BT_DBG("skb %p len %d", skb, skb->len);
2619 hci_send_frame(skb);
2620
2621 conn->sent++;
2622 if (conn->sent == ~0)
2623 conn->sent = 0;
2624 }
2625 }
2626}
2627
6039aa73 2628static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 2629{
73d80deb 2630 struct hci_chan *chan;
6ed58ec5 2631 struct sk_buff *skb;
02b20f0b 2632 int quote, cnt, tmp;
6ed58ec5
VT
2633
2634 BT_DBG("%s", hdev->name);
2635
52087a79
LAD
2636 if (!hci_conn_num(hdev, LE_LINK))
2637 return;
2638
6ed58ec5
VT
2639 if (!test_bit(HCI_RAW, &hdev->flags)) {
2640 /* LE tx timeout must be longer than maximum
2641 * link supervision timeout (40.9 seconds) */
bae1f5d9 2642 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 2643 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2644 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2645 }
2646
2647 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2648 tmp = cnt;
73d80deb 2649 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2650 u32 priority = (skb_peek(&chan->data_q))->priority;
2651 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2652 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2653 skb->len, skb->priority);
6ed58ec5 2654
ec1cce24
LAD
2655 /* Stop if priority has changed */
2656 if (skb->priority < priority)
2657 break;
2658
2659 skb = skb_dequeue(&chan->data_q);
2660
6ed58ec5
VT
2661 hci_send_frame(skb);
2662 hdev->le_last_tx = jiffies;
2663
2664 cnt--;
73d80deb
LAD
2665 chan->sent++;
2666 chan->conn->sent++;
6ed58ec5
VT
2667 }
2668 }
73d80deb 2669
6ed58ec5
VT
2670 if (hdev->le_pkts)
2671 hdev->le_cnt = cnt;
2672 else
2673 hdev->acl_cnt = cnt;
02b20f0b
LAD
2674
2675 if (cnt != tmp)
2676 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2677}
2678
3eff45ea 2679static void hci_tx_work(struct work_struct *work)
1da177e4 2680{
3eff45ea 2681 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2682 struct sk_buff *skb;
2683
6ed58ec5 2684 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 2685 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2686
2687 /* Schedule queues and send stuff to HCI driver */
2688
2689 hci_sched_acl(hdev);
2690
2691 hci_sched_sco(hdev);
2692
b6a0dc82
MH
2693 hci_sched_esco(hdev);
2694
6ed58ec5
VT
2695 hci_sched_le(hdev);
2696
1da177e4
LT
2697 /* Send next queued raw (unknown type) packet */
2698 while ((skb = skb_dequeue(&hdev->raw_q)))
2699 hci_send_frame(skb);
1da177e4
LT
2700}
2701
25985edc 2702/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2703
2704/* ACL data packet */
6039aa73 2705static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2706{
2707 struct hci_acl_hdr *hdr = (void *) skb->data;
2708 struct hci_conn *conn;
2709 __u16 handle, flags;
2710
2711 skb_pull(skb, HCI_ACL_HDR_SIZE);
2712
2713 handle = __le16_to_cpu(hdr->handle);
2714 flags = hci_flags(handle);
2715 handle = hci_handle(handle);
2716
a8c5fb1a
GP
2717 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2718 handle, flags);
1da177e4
LT
2719
2720 hdev->stat.acl_rx++;
2721
2722 hci_dev_lock(hdev);
2723 conn = hci_conn_hash_lookup_handle(hdev, handle);
2724 hci_dev_unlock(hdev);
8e87d142 2725
1da177e4 2726 if (conn) {
65983fc7 2727 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2728
671267bf
JH
2729 hci_dev_lock(hdev);
2730 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2731 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2732 mgmt_device_connected(hdev, &conn->dst, conn->type,
2733 conn->dst_type, 0, NULL, 0,
2734 conn->dev_class);
2735 hci_dev_unlock(hdev);
2736
1da177e4 2737 /* Send to upper protocol */
686ebf28
UF
2738 l2cap_recv_acldata(conn, skb, flags);
2739 return;
1da177e4 2740 } else {
8e87d142 2741 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 2742 hdev->name, handle);
1da177e4
LT
2743 }
2744
2745 kfree_skb(skb);
2746}
2747
2748/* SCO data packet */
6039aa73 2749static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2750{
2751 struct hci_sco_hdr *hdr = (void *) skb->data;
2752 struct hci_conn *conn;
2753 __u16 handle;
2754
2755 skb_pull(skb, HCI_SCO_HDR_SIZE);
2756
2757 handle = __le16_to_cpu(hdr->handle);
2758
2759 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2760
2761 hdev->stat.sco_rx++;
2762
2763 hci_dev_lock(hdev);
2764 conn = hci_conn_hash_lookup_handle(hdev, handle);
2765 hci_dev_unlock(hdev);
2766
2767 if (conn) {
1da177e4 2768 /* Send to upper protocol */
686ebf28
UF
2769 sco_recv_scodata(conn, skb);
2770 return;
1da177e4 2771 } else {
8e87d142 2772 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 2773 hdev->name, handle);
1da177e4
LT
2774 }
2775
2776 kfree_skb(skb);
2777}
2778
b78752cc 2779static void hci_rx_work(struct work_struct *work)
1da177e4 2780{
b78752cc 2781 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2782 struct sk_buff *skb;
2783
2784 BT_DBG("%s", hdev->name);
2785
1da177e4 2786 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2787 /* Send copy to monitor */
2788 hci_send_to_monitor(hdev, skb);
2789
1da177e4
LT
2790 if (atomic_read(&hdev->promisc)) {
2791 /* Send copy to the sockets */
470fe1b5 2792 hci_send_to_sock(hdev, skb);
1da177e4
LT
2793 }
2794
2795 if (test_bit(HCI_RAW, &hdev->flags)) {
2796 kfree_skb(skb);
2797 continue;
2798 }
2799
2800 if (test_bit(HCI_INIT, &hdev->flags)) {
2801 /* Don't process data packets in this states. */
0d48d939 2802 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2803 case HCI_ACLDATA_PKT:
2804 case HCI_SCODATA_PKT:
2805 kfree_skb(skb);
2806 continue;
3ff50b79 2807 }
1da177e4
LT
2808 }
2809
2810 /* Process frame */
0d48d939 2811 switch (bt_cb(skb)->pkt_type) {
1da177e4 2812 case HCI_EVENT_PKT:
b78752cc 2813 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2814 hci_event_packet(hdev, skb);
2815 break;
2816
2817 case HCI_ACLDATA_PKT:
2818 BT_DBG("%s ACL data packet", hdev->name);
2819 hci_acldata_packet(hdev, skb);
2820 break;
2821
2822 case HCI_SCODATA_PKT:
2823 BT_DBG("%s SCO data packet", hdev->name);
2824 hci_scodata_packet(hdev, skb);
2825 break;
2826
2827 default:
2828 kfree_skb(skb);
2829 break;
2830 }
2831 }
1da177e4
LT
2832}
2833
c347b765 2834static void hci_cmd_work(struct work_struct *work)
1da177e4 2835{
c347b765 2836 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2837 struct sk_buff *skb;
2838
2839 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2840
1da177e4 2841 /* Send queued commands */
5a08ecce
AE
2842 if (atomic_read(&hdev->cmd_cnt)) {
2843 skb = skb_dequeue(&hdev->cmd_q);
2844 if (!skb)
2845 return;
2846
7585b97a 2847 kfree_skb(hdev->sent_cmd);
1da177e4 2848
70f23020
AE
2849 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2850 if (hdev->sent_cmd) {
1da177e4
LT
2851 atomic_dec(&hdev->cmd_cnt);
2852 hci_send_frame(skb);
7bdb8a5c
SJ
2853 if (test_bit(HCI_RESET, &hdev->flags))
2854 del_timer(&hdev->cmd_timer);
2855 else
2856 mod_timer(&hdev->cmd_timer,
6bd32326 2857 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2858 } else {
2859 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2860 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2861 }
2862 }
2863}
2519a1fc
AG
2864
2865int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2866{
2867 /* General inquiry access code (GIAC) */
2868 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2869 struct hci_cp_inquiry cp;
2870
2871 BT_DBG("%s", hdev->name);
2872
2873 if (test_bit(HCI_INQUIRY, &hdev->flags))
2874 return -EINPROGRESS;
2875
4663262c
JH
2876 inquiry_cache_flush(hdev);
2877
2519a1fc
AG
2878 memset(&cp, 0, sizeof(cp));
2879 memcpy(&cp.lap, lap, sizeof(cp.lap));
2880 cp.length = length;
2881
2882 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2883}
023d5049
AG
2884
2885int hci_cancel_inquiry(struct hci_dev *hdev)
2886{
2887 BT_DBG("%s", hdev->name);
2888
2889 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 2890 return -EALREADY;
023d5049
AG
2891
2892 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2893}
31f7956c
AG
2894
2895u8 bdaddr_to_le(u8 bdaddr_type)
2896{
2897 switch (bdaddr_type) {
2898 case BDADDR_LE_PUBLIC:
2899 return ADDR_LE_DEV_PUBLIC;
2900
2901 default:
2902 /* Fallback to LE Random address type */
2903 return ADDR_LE_DEV_RANDOM;
2904 }
2905}