Bluetooth: Minor code refactoring
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4
LT
42#include <linux/interrupt.h>
43#include <linux/notifier.h>
611b30f7 44#include <linux/rfkill.h>
6bd32326 45#include <linux/timer.h>
3a0259bb 46#include <linux/crypto.h>
1da177e4
LT
47#include <net/sock.h>
48
49#include <asm/system.h>
70f23020 50#include <linux/uaccess.h>
1da177e4
LT
51#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
ab81cbf9
JH
56#define AUTO_OFF_TIMEOUT 2000
57
8b281b9c 58bool enable_hs;
7784d78f 59
b78752cc 60static void hci_rx_work(struct work_struct *work);
c347b765 61static void hci_cmd_work(struct work_struct *work);
3eff45ea 62static void hci_tx_work(struct work_struct *work);
1da177e4 63
1da177e4
LT
64/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
1da177e4 72/* HCI notifiers list */
e041c683 73static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
74
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
e041c683 79 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
80}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
e041c683 84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
85}
86
6516455d 87static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 88{
e041c683 89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
90}
91
92/* ---- HCI requests ---- */
93
23bb5763 94void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 95{
23bb5763
JH
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
a5040efa
JH
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 102 return;
1da177e4
LT
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
8e87d142 123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 124 unsigned long opt, __u32 timeout)
1da177e4
LT
125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
e175072f 146 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
3ff50b79 156 }
1da177e4 157
a5040efa 158 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 166 unsigned long opt, __u32 timeout)
1da177e4
LT
167{
168 int ret;
169
7c6a329e
MH
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
1da177e4
LT
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
f630cf0d 186 set_bit(HCI_RESET, &hdev->flags);
a9de9248 187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
188}
189
e61ef499 190static void bredr_init(struct hci_dev *hdev)
1da177e4 191{
b0916ea0 192 struct hci_cp_delete_stored_link_key cp;
1ebb9252 193 __le16 param;
89f2783d 194 __u8 flt_type;
1da177e4 195
2455a3ea
AE
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
1da177e4
LT
198 /* Mandatory initialization */
199
200 /* Reset */
f630cf0d 201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 204 }
1da177e4
LT
205
206 /* Read Local Supported Features */
a9de9248 207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 208
1143e5a6 209 /* Read Local Version */
a9de9248 210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 211
1da177e4 212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 214
1da177e4 215 /* Read BD Address */
a9de9248
MH
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
223
224 /* Read Voice Setting */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
89f2783d 230 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 232
1da177e4 233 /* Connection accept timeout ~20 secs */
aca3192c 234 param = cpu_to_le16(0x7d00);
a9de9248 235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
240}
241
e61ef499
AE
242static void amp_init(struct hci_dev *hdev)
243{
2455a3ea
AE
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
e61ef499
AE
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
6ed58ec5
VT
287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
1da177e4
LT
295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
a9de9248 302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
a9de9248 312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
e4e8e37c 321 /* Encryption */
a9de9248 322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
323}
324
e4e8e37c
MH
325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
a418b893 329 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
8e87d142 335/* Get HCI device by index.
1da177e4
LT
336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
8035ded4 339 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
8035ded4 347 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
1da177e4
LT
356
357/* ---- Inquiry support ---- */
ff9ef578 358
30dc78e1
JH
359bool hci_discovery_active(struct hci_dev *hdev)
360{
361 struct discovery_state *discov = &hdev->discovery;
362
6fbe195d
AG
363 switch (discov->state) {
364 case DISCOVERY_INQUIRY:
365 case DISCOVERY_LE_SCAN:
366 case DISCOVERY_RESOLVING:
30dc78e1
JH
367 return true;
368
6fbe195d
AG
369 default:
370 return false;
371 }
30dc78e1
JH
372}
373
ff9ef578
JH
374void hci_discovery_set_state(struct hci_dev *hdev, int state)
375{
376 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
377
378 if (hdev->discovery.state == state)
379 return;
380
381 switch (state) {
382 case DISCOVERY_STOPPED:
383 mgmt_discovering(hdev, 0);
384 break;
385 case DISCOVERY_STARTING:
386 break;
30dc78e1 387 case DISCOVERY_INQUIRY:
c599008f 388 case DISCOVERY_LE_SCAN:
ff9ef578
JH
389 mgmt_discovering(hdev, 1);
390 break;
30dc78e1
JH
391 case DISCOVERY_RESOLVING:
392 break;
ff9ef578
JH
393 case DISCOVERY_STOPPING:
394 break;
395 }
396
397 hdev->discovery.state = state;
398}
399
1da177e4
LT
400static void inquiry_cache_flush(struct hci_dev *hdev)
401{
30883512 402 struct discovery_state *cache = &hdev->discovery;
b57c1a56 403 struct inquiry_entry *p, *n;
1da177e4 404
561aafbc
JH
405 list_for_each_entry_safe(p, n, &cache->all, all) {
406 list_del(&p->all);
b57c1a56 407 kfree(p);
1da177e4 408 }
561aafbc
JH
409
410 INIT_LIST_HEAD(&cache->unknown);
411 INIT_LIST_HEAD(&cache->resolve);
ff9ef578 412 cache->state = DISCOVERY_STOPPED;
1da177e4
LT
413}
414
415struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
416{
30883512 417 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
418 struct inquiry_entry *e;
419
420 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
421
561aafbc
JH
422 list_for_each_entry(e, &cache->all, all) {
423 if (!bacmp(&e->data.bdaddr, bdaddr))
424 return e;
425 }
426
427 return NULL;
428}
429
430struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
431 bdaddr_t *bdaddr)
432{
30883512 433 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
434 struct inquiry_entry *e;
435
436 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
437
438 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 439 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
440 return e;
441 }
442
443 return NULL;
1da177e4
LT
444}
445
30dc78e1
JH
446struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
447 bdaddr_t *bdaddr,
448 int state)
449{
450 struct discovery_state *cache = &hdev->discovery;
451 struct inquiry_entry *e;
452
453 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
454
455 list_for_each_entry(e, &cache->resolve, list) {
456 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
457 return e;
458 if (!bacmp(&e->data.bdaddr, bdaddr))
459 return e;
460 }
461
462 return NULL;
463}
464
a3d4e20a
JH
465void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
466 struct inquiry_entry *ie)
467{
468 struct discovery_state *cache = &hdev->discovery;
469 struct list_head *pos = &cache->resolve;
470 struct inquiry_entry *p;
471
472 list_del(&ie->list);
473
474 list_for_each_entry(p, &cache->resolve, list) {
475 if (p->name_state != NAME_PENDING &&
476 abs(p->data.rssi) >= abs(ie->data.rssi))
477 break;
478 pos = &p->list;
479 }
480
481 list_add(&ie->list, pos);
482}
483
3175405b 484bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
561aafbc 485 bool name_known)
1da177e4 486{
30883512 487 struct discovery_state *cache = &hdev->discovery;
70f23020 488 struct inquiry_entry *ie;
1da177e4
LT
489
490 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
491
70f23020 492 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a
JH
493 if (ie) {
494 if (ie->name_state == NAME_NEEDED &&
495 data->rssi != ie->data.rssi) {
496 ie->data.rssi = data->rssi;
497 hci_inquiry_cache_update_resolve(hdev, ie);
498 }
499
561aafbc 500 goto update;
a3d4e20a 501 }
561aafbc
JH
502
503 /* Entry not in the cache. Add new one. */
504 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
505 if (!ie)
3175405b 506 return false;
561aafbc
JH
507
508 list_add(&ie->all, &cache->all);
509
510 if (name_known) {
511 ie->name_state = NAME_KNOWN;
512 } else {
513 ie->name_state = NAME_NOT_KNOWN;
514 list_add(&ie->list, &cache->unknown);
515 }
70f23020 516
561aafbc
JH
517update:
518 if (name_known && ie->name_state != NAME_KNOWN &&
519 ie->name_state != NAME_PENDING) {
520 ie->name_state = NAME_KNOWN;
521 list_del(&ie->list);
1da177e4
LT
522 }
523
70f23020
AE
524 memcpy(&ie->data, data, sizeof(*data));
525 ie->timestamp = jiffies;
1da177e4 526 cache->timestamp = jiffies;
3175405b
JH
527
528 if (ie->name_state == NAME_NOT_KNOWN)
529 return false;
530
531 return true;
1da177e4
LT
532}
533
534static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
535{
30883512 536 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
537 struct inquiry_info *info = (struct inquiry_info *) buf;
538 struct inquiry_entry *e;
539 int copied = 0;
540
561aafbc 541 list_for_each_entry(e, &cache->all, all) {
1da177e4 542 struct inquiry_data *data = &e->data;
b57c1a56
JH
543
544 if (copied >= num)
545 break;
546
1da177e4
LT
547 bacpy(&info->bdaddr, &data->bdaddr);
548 info->pscan_rep_mode = data->pscan_rep_mode;
549 info->pscan_period_mode = data->pscan_period_mode;
550 info->pscan_mode = data->pscan_mode;
551 memcpy(info->dev_class, data->dev_class, 3);
552 info->clock_offset = data->clock_offset;
b57c1a56 553
1da177e4 554 info++;
b57c1a56 555 copied++;
1da177e4
LT
556 }
557
558 BT_DBG("cache %p, copied %d", cache, copied);
559 return copied;
560}
561
562static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
563{
564 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
565 struct hci_cp_inquiry cp;
566
567 BT_DBG("%s", hdev->name);
568
569 if (test_bit(HCI_INQUIRY, &hdev->flags))
570 return;
571
572 /* Start Inquiry */
573 memcpy(&cp.lap, &ir->lap, 3);
574 cp.length = ir->length;
575 cp.num_rsp = ir->num_rsp;
a9de9248 576 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
577}
578
579int hci_inquiry(void __user *arg)
580{
581 __u8 __user *ptr = arg;
582 struct hci_inquiry_req ir;
583 struct hci_dev *hdev;
584 int err = 0, do_inquiry = 0, max_rsp;
585 long timeo;
586 __u8 *buf;
587
588 if (copy_from_user(&ir, ptr, sizeof(ir)))
589 return -EFAULT;
590
5a08ecce
AE
591 hdev = hci_dev_get(ir.dev_id);
592 if (!hdev)
1da177e4
LT
593 return -ENODEV;
594
09fd0de5 595 hci_dev_lock(hdev);
8e87d142 596 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
597 inquiry_cache_empty(hdev) ||
598 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
599 inquiry_cache_flush(hdev);
600 do_inquiry = 1;
601 }
09fd0de5 602 hci_dev_unlock(hdev);
1da177e4 603
04837f64 604 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
605
606 if (do_inquiry) {
607 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
608 if (err < 0)
609 goto done;
610 }
1da177e4
LT
611
612 /* for unlimited number of responses we will use buffer with 255 entries */
613 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
614
615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616 * copy it to the user space.
617 */
01df8c31 618 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 619 if (!buf) {
1da177e4
LT
620 err = -ENOMEM;
621 goto done;
622 }
623
09fd0de5 624 hci_dev_lock(hdev);
1da177e4 625 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 626 hci_dev_unlock(hdev);
1da177e4
LT
627
628 BT_DBG("num_rsp %d", ir.num_rsp);
629
630 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
631 ptr += sizeof(ir);
632 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
633 ir.num_rsp))
634 err = -EFAULT;
8e87d142 635 } else
1da177e4
LT
636 err = -EFAULT;
637
638 kfree(buf);
639
640done:
641 hci_dev_put(hdev);
642 return err;
643}
644
645/* ---- HCI ioctl helpers ---- */
646
647int hci_dev_open(__u16 dev)
648{
649 struct hci_dev *hdev;
650 int ret = 0;
651
5a08ecce
AE
652 hdev = hci_dev_get(dev);
653 if (!hdev)
1da177e4
LT
654 return -ENODEV;
655
656 BT_DBG("%s %p", hdev->name, hdev);
657
658 hci_req_lock(hdev);
659
611b30f7
MH
660 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
661 ret = -ERFKILL;
662 goto done;
663 }
664
1da177e4
LT
665 if (test_bit(HCI_UP, &hdev->flags)) {
666 ret = -EALREADY;
667 goto done;
668 }
669
670 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
671 set_bit(HCI_RAW, &hdev->flags);
672
07e3b94a
AE
673 /* Treat all non BR/EDR controllers as raw devices if
674 enable_hs is not set */
675 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
676 set_bit(HCI_RAW, &hdev->flags);
677
1da177e4
LT
678 if (hdev->open(hdev)) {
679 ret = -EIO;
680 goto done;
681 }
682
683 if (!test_bit(HCI_RAW, &hdev->flags)) {
684 atomic_set(&hdev->cmd_cnt, 1);
685 set_bit(HCI_INIT, &hdev->flags);
a5040efa 686 hdev->init_last_cmd = 0;
1da177e4 687
04837f64
MH
688 ret = __hci_request(hdev, hci_init_req, 0,
689 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 690
eead27da 691 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
692 ret = __hci_request(hdev, hci_le_init_req, 0,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT));
694
1da177e4
LT
695 clear_bit(HCI_INIT, &hdev->flags);
696 }
697
698 if (!ret) {
699 hci_dev_hold(hdev);
700 set_bit(HCI_UP, &hdev->flags);
701 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 702 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 703 hci_dev_lock(hdev);
744cf19e 704 mgmt_powered(hdev, 1);
09fd0de5 705 hci_dev_unlock(hdev);
56e5cb86 706 }
8e87d142 707 } else {
1da177e4 708 /* Init failed, cleanup */
3eff45ea 709 flush_work(&hdev->tx_work);
c347b765 710 flush_work(&hdev->cmd_work);
b78752cc 711 flush_work(&hdev->rx_work);
1da177e4
LT
712
713 skb_queue_purge(&hdev->cmd_q);
714 skb_queue_purge(&hdev->rx_q);
715
716 if (hdev->flush)
717 hdev->flush(hdev);
718
719 if (hdev->sent_cmd) {
720 kfree_skb(hdev->sent_cmd);
721 hdev->sent_cmd = NULL;
722 }
723
724 hdev->close(hdev);
725 hdev->flags = 0;
726 }
727
728done:
729 hci_req_unlock(hdev);
730 hci_dev_put(hdev);
731 return ret;
732}
733
734static int hci_dev_do_close(struct hci_dev *hdev)
735{
736 BT_DBG("%s %p", hdev->name, hdev);
737
738 hci_req_cancel(hdev, ENODEV);
739 hci_req_lock(hdev);
740
741 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 742 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
743 hci_req_unlock(hdev);
744 return 0;
745 }
746
3eff45ea
GP
747 /* Flush RX and TX works */
748 flush_work(&hdev->tx_work);
b78752cc 749 flush_work(&hdev->rx_work);
1da177e4 750
16ab91ab 751 if (hdev->discov_timeout > 0) {
e0f9309f 752 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
753 hdev->discov_timeout = 0;
754 }
755
a8b2d5c2 756 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 757 cancel_delayed_work(&hdev->power_off);
3243553f 758
a8b2d5c2 759 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
760 cancel_delayed_work(&hdev->service_cache);
761
09fd0de5 762 hci_dev_lock(hdev);
1da177e4
LT
763 inquiry_cache_flush(hdev);
764 hci_conn_hash_flush(hdev);
09fd0de5 765 hci_dev_unlock(hdev);
1da177e4
LT
766
767 hci_notify(hdev, HCI_DEV_DOWN);
768
769 if (hdev->flush)
770 hdev->flush(hdev);
771
772 /* Reset device */
773 skb_queue_purge(&hdev->cmd_q);
774 atomic_set(&hdev->cmd_cnt, 1);
8af59467
JH
775 if (!test_bit(HCI_RAW, &hdev->flags) &&
776 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
1da177e4 777 set_bit(HCI_INIT, &hdev->flags);
04837f64 778 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 779 msecs_to_jiffies(250));
1da177e4
LT
780 clear_bit(HCI_INIT, &hdev->flags);
781 }
782
c347b765
GP
783 /* flush cmd work */
784 flush_work(&hdev->cmd_work);
1da177e4
LT
785
786 /* Drop queues */
787 skb_queue_purge(&hdev->rx_q);
788 skb_queue_purge(&hdev->cmd_q);
789 skb_queue_purge(&hdev->raw_q);
790
791 /* Drop last sent command */
792 if (hdev->sent_cmd) {
b79f44c1 793 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
794 kfree_skb(hdev->sent_cmd);
795 hdev->sent_cmd = NULL;
796 }
797
798 /* After this point our queues are empty
799 * and no tasks are scheduled. */
800 hdev->close(hdev);
801
09fd0de5 802 hci_dev_lock(hdev);
744cf19e 803 mgmt_powered(hdev, 0);
09fd0de5 804 hci_dev_unlock(hdev);
5add6af8 805
1da177e4
LT
806 /* Clear flags */
807 hdev->flags = 0;
808
809 hci_req_unlock(hdev);
810
811 hci_dev_put(hdev);
812 return 0;
813}
814
815int hci_dev_close(__u16 dev)
816{
817 struct hci_dev *hdev;
818 int err;
819
70f23020
AE
820 hdev = hci_dev_get(dev);
821 if (!hdev)
1da177e4
LT
822 return -ENODEV;
823 err = hci_dev_do_close(hdev);
824 hci_dev_put(hdev);
825 return err;
826}
827
828int hci_dev_reset(__u16 dev)
829{
830 struct hci_dev *hdev;
831 int ret = 0;
832
70f23020
AE
833 hdev = hci_dev_get(dev);
834 if (!hdev)
1da177e4
LT
835 return -ENODEV;
836
837 hci_req_lock(hdev);
1da177e4
LT
838
839 if (!test_bit(HCI_UP, &hdev->flags))
840 goto done;
841
842 /* Drop queues */
843 skb_queue_purge(&hdev->rx_q);
844 skb_queue_purge(&hdev->cmd_q);
845
09fd0de5 846 hci_dev_lock(hdev);
1da177e4
LT
847 inquiry_cache_flush(hdev);
848 hci_conn_hash_flush(hdev);
09fd0de5 849 hci_dev_unlock(hdev);
1da177e4
LT
850
851 if (hdev->flush)
852 hdev->flush(hdev);
853
8e87d142 854 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 855 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
856
857 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
858 ret = __hci_request(hdev, hci_reset_req, 0,
859 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
860
861done:
1da177e4
LT
862 hci_req_unlock(hdev);
863 hci_dev_put(hdev);
864 return ret;
865}
866
867int hci_dev_reset_stat(__u16 dev)
868{
869 struct hci_dev *hdev;
870 int ret = 0;
871
70f23020
AE
872 hdev = hci_dev_get(dev);
873 if (!hdev)
1da177e4
LT
874 return -ENODEV;
875
876 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
877
878 hci_dev_put(hdev);
879
880 return ret;
881}
882
883int hci_dev_cmd(unsigned int cmd, void __user *arg)
884{
885 struct hci_dev *hdev;
886 struct hci_dev_req dr;
887 int err = 0;
888
889 if (copy_from_user(&dr, arg, sizeof(dr)))
890 return -EFAULT;
891
70f23020
AE
892 hdev = hci_dev_get(dr.dev_id);
893 if (!hdev)
1da177e4
LT
894 return -ENODEV;
895
896 switch (cmd) {
897 case HCISETAUTH:
04837f64
MH
898 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
899 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
900 break;
901
902 case HCISETENCRYPT:
903 if (!lmp_encrypt_capable(hdev)) {
904 err = -EOPNOTSUPP;
905 break;
906 }
907
908 if (!test_bit(HCI_AUTH, &hdev->flags)) {
909 /* Auth must be enabled first */
04837f64
MH
910 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
911 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
912 if (err)
913 break;
914 }
915
04837f64
MH
916 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
917 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
918 break;
919
920 case HCISETSCAN:
04837f64
MH
921 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
922 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
923 break;
924
1da177e4 925 case HCISETLINKPOL:
e4e8e37c
MH
926 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
927 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
928 break;
929
930 case HCISETLINKMODE:
e4e8e37c
MH
931 hdev->link_mode = ((__u16) dr.dev_opt) &
932 (HCI_LM_MASTER | HCI_LM_ACCEPT);
933 break;
934
935 case HCISETPTYPE:
936 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
937 break;
938
939 case HCISETACLMTU:
e4e8e37c
MH
940 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
941 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
942 break;
943
944 case HCISETSCOMTU:
e4e8e37c
MH
945 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
946 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
947 break;
948
949 default:
950 err = -EINVAL;
951 break;
952 }
e4e8e37c 953
1da177e4
LT
954 hci_dev_put(hdev);
955 return err;
956}
957
958int hci_get_dev_list(void __user *arg)
959{
8035ded4 960 struct hci_dev *hdev;
1da177e4
LT
961 struct hci_dev_list_req *dl;
962 struct hci_dev_req *dr;
1da177e4
LT
963 int n = 0, size, err;
964 __u16 dev_num;
965
966 if (get_user(dev_num, (__u16 __user *) arg))
967 return -EFAULT;
968
969 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
970 return -EINVAL;
971
972 size = sizeof(*dl) + dev_num * sizeof(*dr);
973
70f23020
AE
974 dl = kzalloc(size, GFP_KERNEL);
975 if (!dl)
1da177e4
LT
976 return -ENOMEM;
977
978 dr = dl->dev_req;
979
f20d09d5 980 read_lock(&hci_dev_list_lock);
8035ded4 981 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 982 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 983 cancel_delayed_work(&hdev->power_off);
c542a06c 984
a8b2d5c2
JH
985 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
986 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 987
1da177e4
LT
988 (dr + n)->dev_id = hdev->id;
989 (dr + n)->dev_opt = hdev->flags;
c542a06c 990
1da177e4
LT
991 if (++n >= dev_num)
992 break;
993 }
f20d09d5 994 read_unlock(&hci_dev_list_lock);
1da177e4
LT
995
996 dl->dev_num = n;
997 size = sizeof(*dl) + n * sizeof(*dr);
998
999 err = copy_to_user(arg, dl, size);
1000 kfree(dl);
1001
1002 return err ? -EFAULT : 0;
1003}
1004
1005int hci_get_dev_info(void __user *arg)
1006{
1007 struct hci_dev *hdev;
1008 struct hci_dev_info di;
1009 int err = 0;
1010
1011 if (copy_from_user(&di, arg, sizeof(di)))
1012 return -EFAULT;
1013
70f23020
AE
1014 hdev = hci_dev_get(di.dev_id);
1015 if (!hdev)
1da177e4
LT
1016 return -ENODEV;
1017
a8b2d5c2 1018 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1019 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1020
a8b2d5c2
JH
1021 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1022 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1023
1da177e4
LT
1024 strcpy(di.name, hdev->name);
1025 di.bdaddr = hdev->bdaddr;
943da25d 1026 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1027 di.flags = hdev->flags;
1028 di.pkt_type = hdev->pkt_type;
1029 di.acl_mtu = hdev->acl_mtu;
1030 di.acl_pkts = hdev->acl_pkts;
1031 di.sco_mtu = hdev->sco_mtu;
1032 di.sco_pkts = hdev->sco_pkts;
1033 di.link_policy = hdev->link_policy;
1034 di.link_mode = hdev->link_mode;
1035
1036 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1037 memcpy(&di.features, &hdev->features, sizeof(di.features));
1038
1039 if (copy_to_user(arg, &di, sizeof(di)))
1040 err = -EFAULT;
1041
1042 hci_dev_put(hdev);
1043
1044 return err;
1045}
1046
1047/* ---- Interface to HCI drivers ---- */
1048
611b30f7
MH
1049static int hci_rfkill_set_block(void *data, bool blocked)
1050{
1051 struct hci_dev *hdev = data;
1052
1053 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1054
1055 if (!blocked)
1056 return 0;
1057
1058 hci_dev_do_close(hdev);
1059
1060 return 0;
1061}
1062
1063static const struct rfkill_ops hci_rfkill_ops = {
1064 .set_block = hci_rfkill_set_block,
1065};
1066
1da177e4
LT
1067/* Alloc HCI device */
1068struct hci_dev *hci_alloc_dev(void)
1069{
1070 struct hci_dev *hdev;
1071
25ea6db0 1072 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
1073 if (!hdev)
1074 return NULL;
1075
0ac7e700 1076 hci_init_sysfs(hdev);
1da177e4
LT
1077 skb_queue_head_init(&hdev->driver_init);
1078
1079 return hdev;
1080}
1081EXPORT_SYMBOL(hci_alloc_dev);
1082
1083/* Free HCI device */
1084void hci_free_dev(struct hci_dev *hdev)
1085{
1086 skb_queue_purge(&hdev->driver_init);
1087
a91f2e39
MH
1088 /* will free via device release */
1089 put_device(&hdev->dev);
1da177e4
LT
1090}
1091EXPORT_SYMBOL(hci_free_dev);
1092
ab81cbf9
JH
1093static void hci_power_on(struct work_struct *work)
1094{
1095 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1096
1097 BT_DBG("%s", hdev->name);
1098
1099 if (hci_dev_open(hdev->id) < 0)
1100 return;
1101
a8b2d5c2 1102 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1103 schedule_delayed_work(&hdev->power_off,
3243553f 1104 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1105
a8b2d5c2 1106 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1107 mgmt_index_added(hdev);
ab81cbf9
JH
1108}
1109
1110static void hci_power_off(struct work_struct *work)
1111{
3243553f
JH
1112 struct hci_dev *hdev = container_of(work, struct hci_dev,
1113 power_off.work);
ab81cbf9
JH
1114
1115 BT_DBG("%s", hdev->name);
1116
a8b2d5c2 1117 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ab81cbf9 1118
3243553f 1119 hci_dev_close(hdev->id);
ab81cbf9
JH
1120}
1121
16ab91ab
JH
1122static void hci_discov_off(struct work_struct *work)
1123{
1124 struct hci_dev *hdev;
1125 u8 scan = SCAN_PAGE;
1126
1127 hdev = container_of(work, struct hci_dev, discov_off.work);
1128
1129 BT_DBG("%s", hdev->name);
1130
09fd0de5 1131 hci_dev_lock(hdev);
16ab91ab
JH
1132
1133 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1134
1135 hdev->discov_timeout = 0;
1136
09fd0de5 1137 hci_dev_unlock(hdev);
16ab91ab
JH
1138}
1139
2aeb9a1a
JH
1140int hci_uuids_clear(struct hci_dev *hdev)
1141{
1142 struct list_head *p, *n;
1143
1144 list_for_each_safe(p, n, &hdev->uuids) {
1145 struct bt_uuid *uuid;
1146
1147 uuid = list_entry(p, struct bt_uuid, list);
1148
1149 list_del(p);
1150 kfree(uuid);
1151 }
1152
1153 return 0;
1154}
1155
55ed8ca1
JH
1156int hci_link_keys_clear(struct hci_dev *hdev)
1157{
1158 struct list_head *p, *n;
1159
1160 list_for_each_safe(p, n, &hdev->link_keys) {
1161 struct link_key *key;
1162
1163 key = list_entry(p, struct link_key, list);
1164
1165 list_del(p);
1166 kfree(key);
1167 }
1168
1169 return 0;
1170}
1171
b899efaf
VCG
1172int hci_smp_ltks_clear(struct hci_dev *hdev)
1173{
1174 struct smp_ltk *k, *tmp;
1175
1176 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1177 list_del(&k->list);
1178 kfree(k);
1179 }
1180
1181 return 0;
1182}
1183
55ed8ca1
JH
1184struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1185{
8035ded4 1186 struct link_key *k;
55ed8ca1 1187
8035ded4 1188 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1189 if (bacmp(bdaddr, &k->bdaddr) == 0)
1190 return k;
55ed8ca1
JH
1191
1192 return NULL;
1193}
1194
d25e28ab
JH
1195static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1196 u8 key_type, u8 old_key_type)
1197{
1198 /* Legacy key */
1199 if (key_type < 0x03)
1200 return 1;
1201
1202 /* Debug keys are insecure so don't store them persistently */
1203 if (key_type == HCI_LK_DEBUG_COMBINATION)
1204 return 0;
1205
1206 /* Changed combination key and there's no previous one */
1207 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1208 return 0;
1209
1210 /* Security mode 3 case */
1211 if (!conn)
1212 return 1;
1213
1214 /* Neither local nor remote side had no-bonding as requirement */
1215 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1216 return 1;
1217
1218 /* Local side had dedicated bonding as requirement */
1219 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1220 return 1;
1221
1222 /* Remote side had dedicated bonding as requirement */
1223 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1224 return 1;
1225
1226 /* If none of the above criteria match, then don't store the key
1227 * persistently */
1228 return 0;
1229}
1230
c9839a11 1231struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1232{
c9839a11 1233 struct smp_ltk *k;
75d262c2 1234
c9839a11
VCG
1235 list_for_each_entry(k, &hdev->long_term_keys, list) {
1236 if (k->ediv != ediv ||
1237 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1238 continue;
1239
c9839a11 1240 return k;
75d262c2
VCG
1241 }
1242
1243 return NULL;
1244}
1245EXPORT_SYMBOL(hci_find_ltk);
1246
c9839a11
VCG
1247struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1248 u8 addr_type)
75d262c2 1249{
c9839a11 1250 struct smp_ltk *k;
75d262c2 1251
c9839a11
VCG
1252 list_for_each_entry(k, &hdev->long_term_keys, list)
1253 if (addr_type == k->bdaddr_type &&
1254 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1255 return k;
1256
1257 return NULL;
1258}
c9839a11 1259EXPORT_SYMBOL(hci_find_ltk_by_addr);
75d262c2 1260
d25e28ab
JH
1261int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1262 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1263{
1264 struct link_key *key, *old_key;
4df378a1 1265 u8 old_key_type, persistent;
55ed8ca1
JH
1266
1267 old_key = hci_find_link_key(hdev, bdaddr);
1268 if (old_key) {
1269 old_key_type = old_key->type;
1270 key = old_key;
1271 } else {
12adcf3a 1272 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1273 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1274 if (!key)
1275 return -ENOMEM;
1276 list_add(&key->list, &hdev->link_keys);
1277 }
1278
1279 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1280
d25e28ab
JH
1281 /* Some buggy controller combinations generate a changed
1282 * combination key for legacy pairing even when there's no
1283 * previous key */
1284 if (type == HCI_LK_CHANGED_COMBINATION &&
1285 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1286 old_key_type == 0xff) {
d25e28ab 1287 type = HCI_LK_COMBINATION;
655fe6ec
JH
1288 if (conn)
1289 conn->key_type = type;
1290 }
d25e28ab 1291
55ed8ca1
JH
1292 bacpy(&key->bdaddr, bdaddr);
1293 memcpy(key->val, val, 16);
55ed8ca1
JH
1294 key->pin_len = pin_len;
1295
b6020ba0 1296 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1297 key->type = old_key_type;
4748fed2
JH
1298 else
1299 key->type = type;
1300
4df378a1
JH
1301 if (!new_key)
1302 return 0;
1303
1304 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1305
744cf19e 1306 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1307
1308 if (!persistent) {
1309 list_del(&key->list);
1310 kfree(key);
1311 }
55ed8ca1
JH
1312
1313 return 0;
1314}
1315
c9839a11
VCG
1316int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1317 int new_key, u8 authenticated, u8 tk[16],
1318 u8 enc_size, u16 ediv, u8 rand[8])
75d262c2 1319{
c9839a11 1320 struct smp_ltk *key, *old_key;
75d262c2 1321
c9839a11
VCG
1322 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1323 return 0;
75d262c2 1324
c9839a11
VCG
1325 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1326 if (old_key)
75d262c2 1327 key = old_key;
c9839a11
VCG
1328 else {
1329 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1330 if (!key)
1331 return -ENOMEM;
c9839a11 1332 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1333 }
1334
75d262c2 1335 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1336 key->bdaddr_type = addr_type;
1337 memcpy(key->val, tk, sizeof(key->val));
1338 key->authenticated = authenticated;
1339 key->ediv = ediv;
1340 key->enc_size = enc_size;
1341 key->type = type;
1342 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1343
c9839a11
VCG
1344 if (!new_key)
1345 return 0;
75d262c2 1346
261cc5aa
VCG
1347 if (type & HCI_SMP_LTK)
1348 mgmt_new_ltk(hdev, key, 1);
1349
75d262c2
VCG
1350 return 0;
1351}
1352
55ed8ca1
JH
1353int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1354{
1355 struct link_key *key;
1356
1357 key = hci_find_link_key(hdev, bdaddr);
1358 if (!key)
1359 return -ENOENT;
1360
1361 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1362
1363 list_del(&key->list);
1364 kfree(key);
1365
1366 return 0;
1367}
1368
b899efaf
VCG
1369int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1370{
1371 struct smp_ltk *k, *tmp;
1372
1373 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1374 if (bacmp(bdaddr, &k->bdaddr))
1375 continue;
1376
1377 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1378
1379 list_del(&k->list);
1380 kfree(k);
1381 }
1382
1383 return 0;
1384}
1385
6bd32326
VT
1386/* HCI command timer function */
1387static void hci_cmd_timer(unsigned long arg)
1388{
1389 struct hci_dev *hdev = (void *) arg;
1390
1391 BT_ERR("%s command tx timeout", hdev->name);
1392 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1393 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1394}
1395
2763eda6
SJ
1396struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1397 bdaddr_t *bdaddr)
1398{
1399 struct oob_data *data;
1400
1401 list_for_each_entry(data, &hdev->remote_oob_data, list)
1402 if (bacmp(bdaddr, &data->bdaddr) == 0)
1403 return data;
1404
1405 return NULL;
1406}
1407
1408int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1409{
1410 struct oob_data *data;
1411
1412 data = hci_find_remote_oob_data(hdev, bdaddr);
1413 if (!data)
1414 return -ENOENT;
1415
1416 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1417
1418 list_del(&data->list);
1419 kfree(data);
1420
1421 return 0;
1422}
1423
1424int hci_remote_oob_data_clear(struct hci_dev *hdev)
1425{
1426 struct oob_data *data, *n;
1427
1428 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1429 list_del(&data->list);
1430 kfree(data);
1431 }
1432
1433 return 0;
1434}
1435
1436int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1437 u8 *randomizer)
1438{
1439 struct oob_data *data;
1440
1441 data = hci_find_remote_oob_data(hdev, bdaddr);
1442
1443 if (!data) {
1444 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1445 if (!data)
1446 return -ENOMEM;
1447
1448 bacpy(&data->bdaddr, bdaddr);
1449 list_add(&data->list, &hdev->remote_oob_data);
1450 }
1451
1452 memcpy(data->hash, hash, sizeof(data->hash));
1453 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1454
1455 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1456
1457 return 0;
1458}
1459
b2a66aad
AJ
1460struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1461 bdaddr_t *bdaddr)
1462{
8035ded4 1463 struct bdaddr_list *b;
b2a66aad 1464
8035ded4 1465 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1466 if (bacmp(bdaddr, &b->bdaddr) == 0)
1467 return b;
b2a66aad
AJ
1468
1469 return NULL;
1470}
1471
1472int hci_blacklist_clear(struct hci_dev *hdev)
1473{
1474 struct list_head *p, *n;
1475
1476 list_for_each_safe(p, n, &hdev->blacklist) {
1477 struct bdaddr_list *b;
1478
1479 b = list_entry(p, struct bdaddr_list, list);
1480
1481 list_del(p);
1482 kfree(b);
1483 }
1484
1485 return 0;
1486}
1487
1488int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1489{
1490 struct bdaddr_list *entry;
b2a66aad
AJ
1491
1492 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1493 return -EBADF;
1494
5e762444
AJ
1495 if (hci_blacklist_lookup(hdev, bdaddr))
1496 return -EEXIST;
b2a66aad
AJ
1497
1498 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1499 if (!entry)
1500 return -ENOMEM;
b2a66aad
AJ
1501
1502 bacpy(&entry->bdaddr, bdaddr);
1503
1504 list_add(&entry->list, &hdev->blacklist);
1505
744cf19e 1506 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1507}
1508
1509int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1510{
1511 struct bdaddr_list *entry;
b2a66aad 1512
1ec918ce 1513 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1514 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1515
1516 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1517 if (!entry)
5e762444 1518 return -ENOENT;
b2a66aad
AJ
1519
1520 list_del(&entry->list);
1521 kfree(entry);
1522
744cf19e 1523 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1524}
1525
db323f2f 1526static void hci_clear_adv_cache(struct work_struct *work)
35815085 1527{
db323f2f
GP
1528 struct hci_dev *hdev = container_of(work, struct hci_dev,
1529 adv_work.work);
35815085
AG
1530
1531 hci_dev_lock(hdev);
1532
1533 hci_adv_entries_clear(hdev);
1534
1535 hci_dev_unlock(hdev);
1536}
1537
76c8686f
AG
1538int hci_adv_entries_clear(struct hci_dev *hdev)
1539{
1540 struct adv_entry *entry, *tmp;
1541
1542 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1543 list_del(&entry->list);
1544 kfree(entry);
1545 }
1546
1547 BT_DBG("%s adv cache cleared", hdev->name);
1548
1549 return 0;
1550}
1551
1552struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1553{
1554 struct adv_entry *entry;
1555
1556 list_for_each_entry(entry, &hdev->adv_entries, list)
1557 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1558 return entry;
1559
1560 return NULL;
1561}
1562
1563static inline int is_connectable_adv(u8 evt_type)
1564{
1565 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1566 return 1;
1567
1568 return 0;
1569}
1570
1571int hci_add_adv_entry(struct hci_dev *hdev,
1572 struct hci_ev_le_advertising_info *ev)
1573{
1574 struct adv_entry *entry;
1575
1576 if (!is_connectable_adv(ev->evt_type))
1577 return -EINVAL;
1578
1579 /* Only new entries should be added to adv_entries. So, if
1580 * bdaddr was found, don't add it. */
1581 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1582 return 0;
1583
4777bfde 1584 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
76c8686f
AG
1585 if (!entry)
1586 return -ENOMEM;
1587
1588 bacpy(&entry->bdaddr, &ev->bdaddr);
1589 entry->bdaddr_type = ev->bdaddr_type;
1590
1591 list_add(&entry->list, &hdev->adv_entries);
1592
1593 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1594 batostr(&entry->bdaddr), entry->bdaddr_type);
1595
1596 return 0;
1597}
1598
1da177e4
LT
1599/* Register HCI device */
1600int hci_register_dev(struct hci_dev *hdev)
1601{
1602 struct list_head *head = &hci_dev_list, *p;
08add513 1603 int i, id, error;
1da177e4 1604
e9b9cfa1 1605 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1606
010666a1 1607 if (!hdev->open || !hdev->close)
1da177e4
LT
1608 return -EINVAL;
1609
08add513
MM
1610 /* Do not allow HCI_AMP devices to register at index 0,
1611 * so the index can be used as the AMP controller ID.
1612 */
1613 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1614
f20d09d5 1615 write_lock(&hci_dev_list_lock);
1da177e4
LT
1616
1617 /* Find first available device id */
1618 list_for_each(p, &hci_dev_list) {
1619 if (list_entry(p, struct hci_dev, list)->id != id)
1620 break;
1621 head = p; id++;
1622 }
8e87d142 1623
1da177e4
LT
1624 sprintf(hdev->name, "hci%d", id);
1625 hdev->id = id;
c6feeb28 1626 list_add_tail(&hdev->list, head);
1da177e4 1627
09fd0de5 1628 mutex_init(&hdev->lock);
1da177e4
LT
1629
1630 hdev->flags = 0;
d23264a8 1631 hdev->dev_flags = 0;
1da177e4 1632 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1633 hdev->esco_type = (ESCO_HV1);
1da177e4 1634 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1635 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1636
04837f64
MH
1637 hdev->idle_timeout = 0;
1638 hdev->sniff_max_interval = 800;
1639 hdev->sniff_min_interval = 80;
1640
b78752cc 1641 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1642 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1643 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1644
1da177e4
LT
1645
1646 skb_queue_head_init(&hdev->rx_q);
1647 skb_queue_head_init(&hdev->cmd_q);
1648 skb_queue_head_init(&hdev->raw_q);
1649
6bd32326
VT
1650 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1651
cd4c5391 1652 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1653 hdev->reassembly[i] = NULL;
1654
1da177e4 1655 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1656 mutex_init(&hdev->req_lock);
1da177e4 1657
30883512 1658 discovery_init(hdev);
1da177e4
LT
1659
1660 hci_conn_hash_init(hdev);
1661
2e58ef3e
JH
1662 INIT_LIST_HEAD(&hdev->mgmt_pending);
1663
ea4bd8ba 1664 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1665
2aeb9a1a
JH
1666 INIT_LIST_HEAD(&hdev->uuids);
1667
55ed8ca1 1668 INIT_LIST_HEAD(&hdev->link_keys);
b899efaf 1669 INIT_LIST_HEAD(&hdev->long_term_keys);
55ed8ca1 1670
2763eda6
SJ
1671 INIT_LIST_HEAD(&hdev->remote_oob_data);
1672
76c8686f
AG
1673 INIT_LIST_HEAD(&hdev->adv_entries);
1674
db323f2f 1675 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1676 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1677 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1678
16ab91ab
JH
1679 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1680
1da177e4
LT
1681 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1682
1683 atomic_set(&hdev->promisc, 0);
1684
f20d09d5 1685 write_unlock(&hci_dev_list_lock);
1da177e4 1686
32845eb1
GP
1687 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1688 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1689 if (!hdev->workqueue) {
1690 error = -ENOMEM;
1691 goto err;
1692 }
f48fd9c8 1693
33ca954d
DH
1694 error = hci_add_sysfs(hdev);
1695 if (error < 0)
1696 goto err_wqueue;
1da177e4 1697
611b30f7
MH
1698 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1699 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1700 if (hdev->rfkill) {
1701 if (rfkill_register(hdev->rfkill) < 0) {
1702 rfkill_destroy(hdev->rfkill);
1703 hdev->rfkill = NULL;
1704 }
1705 }
1706
a8b2d5c2
JH
1707 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1708 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1709 schedule_work(&hdev->power_on);
ab81cbf9 1710
1da177e4 1711 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1712 hci_dev_hold(hdev);
1da177e4
LT
1713
1714 return id;
f48fd9c8 1715
33ca954d
DH
1716err_wqueue:
1717 destroy_workqueue(hdev->workqueue);
1718err:
f20d09d5 1719 write_lock(&hci_dev_list_lock);
f48fd9c8 1720 list_del(&hdev->list);
f20d09d5 1721 write_unlock(&hci_dev_list_lock);
f48fd9c8 1722
33ca954d 1723 return error;
1da177e4
LT
1724}
1725EXPORT_SYMBOL(hci_register_dev);
1726
1727/* Unregister HCI device */
59735631 1728void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1729{
ef222013
MH
1730 int i;
1731
c13854ce 1732 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1733
f20d09d5 1734 write_lock(&hci_dev_list_lock);
1da177e4 1735 list_del(&hdev->list);
f20d09d5 1736 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1737
1738 hci_dev_do_close(hdev);
1739
cd4c5391 1740 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1741 kfree_skb(hdev->reassembly[i]);
1742
ab81cbf9 1743 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8b2d5c2 1744 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1745 hci_dev_lock(hdev);
744cf19e 1746 mgmt_index_removed(hdev);
09fd0de5 1747 hci_dev_unlock(hdev);
56e5cb86 1748 }
ab81cbf9 1749
2e58ef3e
JH
1750 /* mgmt_index_removed should take care of emptying the
1751 * pending list */
1752 BUG_ON(!list_empty(&hdev->mgmt_pending));
1753
1da177e4
LT
1754 hci_notify(hdev, HCI_DEV_UNREG);
1755
611b30f7
MH
1756 if (hdev->rfkill) {
1757 rfkill_unregister(hdev->rfkill);
1758 rfkill_destroy(hdev->rfkill);
1759 }
1760
ce242970 1761 hci_del_sysfs(hdev);
147e2d59 1762
db323f2f 1763 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1764
f48fd9c8
MH
1765 destroy_workqueue(hdev->workqueue);
1766
09fd0de5 1767 hci_dev_lock(hdev);
e2e0cacb 1768 hci_blacklist_clear(hdev);
2aeb9a1a 1769 hci_uuids_clear(hdev);
55ed8ca1 1770 hci_link_keys_clear(hdev);
b899efaf 1771 hci_smp_ltks_clear(hdev);
2763eda6 1772 hci_remote_oob_data_clear(hdev);
76c8686f 1773 hci_adv_entries_clear(hdev);
09fd0de5 1774 hci_dev_unlock(hdev);
e2e0cacb 1775
dc946bd8 1776 hci_dev_put(hdev);
1da177e4
LT
1777}
1778EXPORT_SYMBOL(hci_unregister_dev);
1779
1780/* Suspend HCI device */
1781int hci_suspend_dev(struct hci_dev *hdev)
1782{
1783 hci_notify(hdev, HCI_DEV_SUSPEND);
1784 return 0;
1785}
1786EXPORT_SYMBOL(hci_suspend_dev);
1787
1788/* Resume HCI device */
1789int hci_resume_dev(struct hci_dev *hdev)
1790{
1791 hci_notify(hdev, HCI_DEV_RESUME);
1792 return 0;
1793}
1794EXPORT_SYMBOL(hci_resume_dev);
1795
76bca880
MH
1796/* Receive frame from HCI drivers */
1797int hci_recv_frame(struct sk_buff *skb)
1798{
1799 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1800 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1801 && !test_bit(HCI_INIT, &hdev->flags))) {
1802 kfree_skb(skb);
1803 return -ENXIO;
1804 }
1805
1806 /* Incomming skb */
1807 bt_cb(skb)->incoming = 1;
1808
1809 /* Time stamp */
1810 __net_timestamp(skb);
1811
76bca880 1812 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1813 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1814
76bca880
MH
1815 return 0;
1816}
1817EXPORT_SYMBOL(hci_recv_frame);
1818
33e882a5 1819static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1820 int count, __u8 index)
33e882a5
SS
1821{
1822 int len = 0;
1823 int hlen = 0;
1824 int remain = count;
1825 struct sk_buff *skb;
1826 struct bt_skb_cb *scb;
1827
1828 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1829 index >= NUM_REASSEMBLY)
1830 return -EILSEQ;
1831
1832 skb = hdev->reassembly[index];
1833
1834 if (!skb) {
1835 switch (type) {
1836 case HCI_ACLDATA_PKT:
1837 len = HCI_MAX_FRAME_SIZE;
1838 hlen = HCI_ACL_HDR_SIZE;
1839 break;
1840 case HCI_EVENT_PKT:
1841 len = HCI_MAX_EVENT_SIZE;
1842 hlen = HCI_EVENT_HDR_SIZE;
1843 break;
1844 case HCI_SCODATA_PKT:
1845 len = HCI_MAX_SCO_SIZE;
1846 hlen = HCI_SCO_HDR_SIZE;
1847 break;
1848 }
1849
1e429f38 1850 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1851 if (!skb)
1852 return -ENOMEM;
1853
1854 scb = (void *) skb->cb;
1855 scb->expect = hlen;
1856 scb->pkt_type = type;
1857
1858 skb->dev = (void *) hdev;
1859 hdev->reassembly[index] = skb;
1860 }
1861
1862 while (count) {
1863 scb = (void *) skb->cb;
1864 len = min(scb->expect, (__u16)count);
1865
1866 memcpy(skb_put(skb, len), data, len);
1867
1868 count -= len;
1869 data += len;
1870 scb->expect -= len;
1871 remain = count;
1872
1873 switch (type) {
1874 case HCI_EVENT_PKT:
1875 if (skb->len == HCI_EVENT_HDR_SIZE) {
1876 struct hci_event_hdr *h = hci_event_hdr(skb);
1877 scb->expect = h->plen;
1878
1879 if (skb_tailroom(skb) < scb->expect) {
1880 kfree_skb(skb);
1881 hdev->reassembly[index] = NULL;
1882 return -ENOMEM;
1883 }
1884 }
1885 break;
1886
1887 case HCI_ACLDATA_PKT:
1888 if (skb->len == HCI_ACL_HDR_SIZE) {
1889 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1890 scb->expect = __le16_to_cpu(h->dlen);
1891
1892 if (skb_tailroom(skb) < scb->expect) {
1893 kfree_skb(skb);
1894 hdev->reassembly[index] = NULL;
1895 return -ENOMEM;
1896 }
1897 }
1898 break;
1899
1900 case HCI_SCODATA_PKT:
1901 if (skb->len == HCI_SCO_HDR_SIZE) {
1902 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1903 scb->expect = h->dlen;
1904
1905 if (skb_tailroom(skb) < scb->expect) {
1906 kfree_skb(skb);
1907 hdev->reassembly[index] = NULL;
1908 return -ENOMEM;
1909 }
1910 }
1911 break;
1912 }
1913
1914 if (scb->expect == 0) {
1915 /* Complete frame */
1916
1917 bt_cb(skb)->pkt_type = type;
1918 hci_recv_frame(skb);
1919
1920 hdev->reassembly[index] = NULL;
1921 return remain;
1922 }
1923 }
1924
1925 return remain;
1926}
1927
ef222013
MH
1928int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1929{
f39a3c06
SS
1930 int rem = 0;
1931
ef222013
MH
1932 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1933 return -EILSEQ;
1934
da5f6c37 1935 while (count) {
1e429f38 1936 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1937 if (rem < 0)
1938 return rem;
ef222013 1939
f39a3c06
SS
1940 data += (count - rem);
1941 count = rem;
f81c6224 1942 }
ef222013 1943
f39a3c06 1944 return rem;
ef222013
MH
1945}
1946EXPORT_SYMBOL(hci_recv_fragment);
1947
99811510
SS
1948#define STREAM_REASSEMBLY 0
1949
1950int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1951{
1952 int type;
1953 int rem = 0;
1954
da5f6c37 1955 while (count) {
99811510
SS
1956 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1957
1958 if (!skb) {
1959 struct { char type; } *pkt;
1960
1961 /* Start of the frame */
1962 pkt = data;
1963 type = pkt->type;
1964
1965 data++;
1966 count--;
1967 } else
1968 type = bt_cb(skb)->pkt_type;
1969
1e429f38
GP
1970 rem = hci_reassembly(hdev, type, data, count,
1971 STREAM_REASSEMBLY);
99811510
SS
1972 if (rem < 0)
1973 return rem;
1974
1975 data += (count - rem);
1976 count = rem;
f81c6224 1977 }
99811510
SS
1978
1979 return rem;
1980}
1981EXPORT_SYMBOL(hci_recv_stream_fragment);
1982
1da177e4
LT
1983/* ---- Interface to upper protocols ---- */
1984
1da177e4
LT
1985int hci_register_cb(struct hci_cb *cb)
1986{
1987 BT_DBG("%p name %s", cb, cb->name);
1988
f20d09d5 1989 write_lock(&hci_cb_list_lock);
1da177e4 1990 list_add(&cb->list, &hci_cb_list);
f20d09d5 1991 write_unlock(&hci_cb_list_lock);
1da177e4
LT
1992
1993 return 0;
1994}
1995EXPORT_SYMBOL(hci_register_cb);
1996
1997int hci_unregister_cb(struct hci_cb *cb)
1998{
1999 BT_DBG("%p name %s", cb, cb->name);
2000
f20d09d5 2001 write_lock(&hci_cb_list_lock);
1da177e4 2002 list_del(&cb->list);
f20d09d5 2003 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2004
2005 return 0;
2006}
2007EXPORT_SYMBOL(hci_unregister_cb);
2008
2009static int hci_send_frame(struct sk_buff *skb)
2010{
2011 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2012
2013 if (!hdev) {
2014 kfree_skb(skb);
2015 return -ENODEV;
2016 }
2017
0d48d939 2018 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
2019
2020 if (atomic_read(&hdev->promisc)) {
2021 /* Time stamp */
a61bbcf2 2022 __net_timestamp(skb);
1da177e4 2023
eec8d2bc 2024 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2025 }
2026
2027 /* Get rid of skb owner, prior to sending to the driver. */
2028 skb_orphan(skb);
2029
2030 return hdev->send(skb);
2031}
2032
2033/* Send HCI command */
a9de9248 2034int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2035{
2036 int len = HCI_COMMAND_HDR_SIZE + plen;
2037 struct hci_command_hdr *hdr;
2038 struct sk_buff *skb;
2039
a9de9248 2040 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2041
2042 skb = bt_skb_alloc(len, GFP_ATOMIC);
2043 if (!skb) {
ef222013 2044 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2045 return -ENOMEM;
2046 }
2047
2048 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2049 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2050 hdr->plen = plen;
2051
2052 if (plen)
2053 memcpy(skb_put(skb, plen), param, plen);
2054
2055 BT_DBG("skb len %d", skb->len);
2056
0d48d939 2057 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2058 skb->dev = (void *) hdev;
c78ae283 2059
a5040efa
JH
2060 if (test_bit(HCI_INIT, &hdev->flags))
2061 hdev->init_last_cmd = opcode;
2062
1da177e4 2063 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2064 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2065
2066 return 0;
2067}
1da177e4
LT
2068
2069/* Get data from the previously sent command */
a9de9248 2070void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2071{
2072 struct hci_command_hdr *hdr;
2073
2074 if (!hdev->sent_cmd)
2075 return NULL;
2076
2077 hdr = (void *) hdev->sent_cmd->data;
2078
a9de9248 2079 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2080 return NULL;
2081
a9de9248 2082 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2083
2084 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2085}
2086
2087/* Send ACL data */
2088static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2089{
2090 struct hci_acl_hdr *hdr;
2091 int len = skb->len;
2092
badff6d0
ACM
2093 skb_push(skb, HCI_ACL_HDR_SIZE);
2094 skb_reset_transport_header(skb);
9c70220b 2095 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2096 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2097 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2098}
2099
73d80deb
LAD
2100static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2101 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2102{
2103 struct hci_dev *hdev = conn->hdev;
2104 struct sk_buff *list;
2105
70f23020
AE
2106 list = skb_shinfo(skb)->frag_list;
2107 if (!list) {
1da177e4
LT
2108 /* Non fragmented */
2109 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2110
73d80deb 2111 skb_queue_tail(queue, skb);
1da177e4
LT
2112 } else {
2113 /* Fragmented */
2114 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2115
2116 skb_shinfo(skb)->frag_list = NULL;
2117
2118 /* Queue all fragments atomically */
af3e6359 2119 spin_lock(&queue->lock);
1da177e4 2120
73d80deb 2121 __skb_queue_tail(queue, skb);
e702112f
AE
2122
2123 flags &= ~ACL_START;
2124 flags |= ACL_CONT;
1da177e4
LT
2125 do {
2126 skb = list; list = list->next;
8e87d142 2127
1da177e4 2128 skb->dev = (void *) hdev;
0d48d939 2129 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2130 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2131
2132 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2133
73d80deb 2134 __skb_queue_tail(queue, skb);
1da177e4
LT
2135 } while (list);
2136
af3e6359 2137 spin_unlock(&queue->lock);
1da177e4 2138 }
73d80deb
LAD
2139}
2140
2141void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2142{
2143 struct hci_conn *conn = chan->conn;
2144 struct hci_dev *hdev = conn->hdev;
2145
2146 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2147
2148 skb->dev = (void *) hdev;
2149 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2150 hci_add_acl_hdr(skb, conn->handle, flags);
2151
2152 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2153
3eff45ea 2154 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2155}
2156EXPORT_SYMBOL(hci_send_acl);
2157
2158/* Send SCO data */
0d861d8b 2159void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2160{
2161 struct hci_dev *hdev = conn->hdev;
2162 struct hci_sco_hdr hdr;
2163
2164 BT_DBG("%s len %d", hdev->name, skb->len);
2165
aca3192c 2166 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2167 hdr.dlen = skb->len;
2168
badff6d0
ACM
2169 skb_push(skb, HCI_SCO_HDR_SIZE);
2170 skb_reset_transport_header(skb);
9c70220b 2171 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2172
2173 skb->dev = (void *) hdev;
0d48d939 2174 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2175
1da177e4 2176 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2177 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2178}
2179EXPORT_SYMBOL(hci_send_sco);
2180
2181/* ---- HCI TX task (outgoing data) ---- */
2182
2183/* HCI Connection scheduler */
2184static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2185{
2186 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2187 struct hci_conn *conn = NULL, *c;
1da177e4 2188 int num = 0, min = ~0;
1da177e4 2189
8e87d142 2190 /* We don't have to lock device here. Connections are always
1da177e4 2191 * added and removed with TX task disabled. */
bf4c6325
GP
2192
2193 rcu_read_lock();
2194
2195 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2196 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2197 continue;
769be974
MH
2198
2199 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2200 continue;
2201
1da177e4
LT
2202 num++;
2203
2204 if (c->sent < min) {
2205 min = c->sent;
2206 conn = c;
2207 }
52087a79
LAD
2208
2209 if (hci_conn_num(hdev, type) == num)
2210 break;
1da177e4
LT
2211 }
2212
bf4c6325
GP
2213 rcu_read_unlock();
2214
1da177e4 2215 if (conn) {
6ed58ec5
VT
2216 int cnt, q;
2217
2218 switch (conn->type) {
2219 case ACL_LINK:
2220 cnt = hdev->acl_cnt;
2221 break;
2222 case SCO_LINK:
2223 case ESCO_LINK:
2224 cnt = hdev->sco_cnt;
2225 break;
2226 case LE_LINK:
2227 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2228 break;
2229 default:
2230 cnt = 0;
2231 BT_ERR("Unknown link type");
2232 }
2233
2234 q = cnt / num;
1da177e4
LT
2235 *quote = q ? q : 1;
2236 } else
2237 *quote = 0;
2238
2239 BT_DBG("conn %p quote %d", conn, *quote);
2240 return conn;
2241}
2242
bae1f5d9 2243static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2244{
2245 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2246 struct hci_conn *c;
1da177e4 2247
bae1f5d9 2248 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2249
bf4c6325
GP
2250 rcu_read_lock();
2251
1da177e4 2252 /* Kill stalled connections */
bf4c6325 2253 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2254 if (c->type == type && c->sent) {
2255 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2256 hdev->name, batostr(&c->dst));
2257 hci_acl_disconn(c, 0x13);
2258 }
2259 }
bf4c6325
GP
2260
2261 rcu_read_unlock();
1da177e4
LT
2262}
2263
73d80deb
LAD
2264static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2265 int *quote)
1da177e4 2266{
73d80deb
LAD
2267 struct hci_conn_hash *h = &hdev->conn_hash;
2268 struct hci_chan *chan = NULL;
2269 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2270 struct hci_conn *conn;
73d80deb
LAD
2271 int cnt, q, conn_num = 0;
2272
2273 BT_DBG("%s", hdev->name);
2274
bf4c6325
GP
2275 rcu_read_lock();
2276
2277 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2278 struct hci_chan *tmp;
2279
2280 if (conn->type != type)
2281 continue;
2282
2283 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2284 continue;
2285
2286 conn_num++;
2287
8192edef 2288 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2289 struct sk_buff *skb;
2290
2291 if (skb_queue_empty(&tmp->data_q))
2292 continue;
2293
2294 skb = skb_peek(&tmp->data_q);
2295 if (skb->priority < cur_prio)
2296 continue;
2297
2298 if (skb->priority > cur_prio) {
2299 num = 0;
2300 min = ~0;
2301 cur_prio = skb->priority;
2302 }
2303
2304 num++;
2305
2306 if (conn->sent < min) {
2307 min = conn->sent;
2308 chan = tmp;
2309 }
2310 }
2311
2312 if (hci_conn_num(hdev, type) == conn_num)
2313 break;
2314 }
2315
bf4c6325
GP
2316 rcu_read_unlock();
2317
73d80deb
LAD
2318 if (!chan)
2319 return NULL;
2320
2321 switch (chan->conn->type) {
2322 case ACL_LINK:
2323 cnt = hdev->acl_cnt;
2324 break;
2325 case SCO_LINK:
2326 case ESCO_LINK:
2327 cnt = hdev->sco_cnt;
2328 break;
2329 case LE_LINK:
2330 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2331 break;
2332 default:
2333 cnt = 0;
2334 BT_ERR("Unknown link type");
2335 }
2336
2337 q = cnt / num;
2338 *quote = q ? q : 1;
2339 BT_DBG("chan %p quote %d", chan, *quote);
2340 return chan;
2341}
2342
02b20f0b
LAD
2343static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2344{
2345 struct hci_conn_hash *h = &hdev->conn_hash;
2346 struct hci_conn *conn;
2347 int num = 0;
2348
2349 BT_DBG("%s", hdev->name);
2350
bf4c6325
GP
2351 rcu_read_lock();
2352
2353 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2354 struct hci_chan *chan;
2355
2356 if (conn->type != type)
2357 continue;
2358
2359 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2360 continue;
2361
2362 num++;
2363
8192edef 2364 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2365 struct sk_buff *skb;
2366
2367 if (chan->sent) {
2368 chan->sent = 0;
2369 continue;
2370 }
2371
2372 if (skb_queue_empty(&chan->data_q))
2373 continue;
2374
2375 skb = skb_peek(&chan->data_q);
2376 if (skb->priority >= HCI_PRIO_MAX - 1)
2377 continue;
2378
2379 skb->priority = HCI_PRIO_MAX - 1;
2380
2381 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2382 skb->priority);
2383 }
2384
2385 if (hci_conn_num(hdev, type) == num)
2386 break;
2387 }
bf4c6325
GP
2388
2389 rcu_read_unlock();
2390
02b20f0b
LAD
2391}
2392
b71d385a
AE
2393static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2394{
2395 /* Calculate count of blocks used by this packet */
2396 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2397}
2398
63d2bc1b 2399static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2400{
1da177e4
LT
2401 if (!test_bit(HCI_RAW, &hdev->flags)) {
2402 /* ACL tx timeout must be longer than maximum
2403 * link supervision timeout (40.9 seconds) */
63d2bc1b 2404 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
cc48dc0a 2405 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2406 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2407 }
63d2bc1b 2408}
1da177e4 2409
63d2bc1b
AE
2410static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2411{
2412 unsigned int cnt = hdev->acl_cnt;
2413 struct hci_chan *chan;
2414 struct sk_buff *skb;
2415 int quote;
2416
2417 __check_timeout(hdev, cnt);
04837f64 2418
73d80deb
LAD
2419 while (hdev->acl_cnt &&
2420 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2421 u32 priority = (skb_peek(&chan->data_q))->priority;
2422 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2423 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2424 skb->len, skb->priority);
2425
ec1cce24
LAD
2426 /* Stop if priority has changed */
2427 if (skb->priority < priority)
2428 break;
2429
2430 skb = skb_dequeue(&chan->data_q);
2431
73d80deb
LAD
2432 hci_conn_enter_active_mode(chan->conn,
2433 bt_cb(skb)->force_active);
04837f64 2434
1da177e4
LT
2435 hci_send_frame(skb);
2436 hdev->acl_last_tx = jiffies;
2437
2438 hdev->acl_cnt--;
73d80deb
LAD
2439 chan->sent++;
2440 chan->conn->sent++;
1da177e4
LT
2441 }
2442 }
02b20f0b
LAD
2443
2444 if (cnt != hdev->acl_cnt)
2445 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2446}
2447
b71d385a
AE
2448static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2449{
63d2bc1b 2450 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2451 struct hci_chan *chan;
2452 struct sk_buff *skb;
2453 int quote;
b71d385a 2454
63d2bc1b 2455 __check_timeout(hdev, cnt);
b71d385a
AE
2456
2457 while (hdev->block_cnt > 0 &&
2458 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2459 u32 priority = (skb_peek(&chan->data_q))->priority;
2460 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2461 int blocks;
2462
2463 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2464 skb->len, skb->priority);
2465
2466 /* Stop if priority has changed */
2467 if (skb->priority < priority)
2468 break;
2469
2470 skb = skb_dequeue(&chan->data_q);
2471
2472 blocks = __get_blocks(hdev, skb);
2473 if (blocks > hdev->block_cnt)
2474 return;
2475
2476 hci_conn_enter_active_mode(chan->conn,
2477 bt_cb(skb)->force_active);
2478
2479 hci_send_frame(skb);
2480 hdev->acl_last_tx = jiffies;
2481
2482 hdev->block_cnt -= blocks;
2483 quote -= blocks;
2484
2485 chan->sent += blocks;
2486 chan->conn->sent += blocks;
2487 }
2488 }
2489
2490 if (cnt != hdev->block_cnt)
2491 hci_prio_recalculate(hdev, ACL_LINK);
2492}
2493
2494static inline void hci_sched_acl(struct hci_dev *hdev)
2495{
2496 BT_DBG("%s", hdev->name);
2497
2498 if (!hci_conn_num(hdev, ACL_LINK))
2499 return;
2500
2501 switch (hdev->flow_ctl_mode) {
2502 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2503 hci_sched_acl_pkt(hdev);
2504 break;
2505
2506 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2507 hci_sched_acl_blk(hdev);
2508 break;
2509 }
2510}
2511
1da177e4
LT
2512/* Schedule SCO */
2513static inline void hci_sched_sco(struct hci_dev *hdev)
2514{
2515 struct hci_conn *conn;
2516 struct sk_buff *skb;
2517 int quote;
2518
2519 BT_DBG("%s", hdev->name);
2520
52087a79
LAD
2521 if (!hci_conn_num(hdev, SCO_LINK))
2522 return;
2523
1da177e4
LT
2524 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2525 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2526 BT_DBG("skb %p len %d", skb, skb->len);
2527 hci_send_frame(skb);
2528
2529 conn->sent++;
2530 if (conn->sent == ~0)
2531 conn->sent = 0;
2532 }
2533 }
2534}
2535
b6a0dc82
MH
2536static inline void hci_sched_esco(struct hci_dev *hdev)
2537{
2538 struct hci_conn *conn;
2539 struct sk_buff *skb;
2540 int quote;
2541
2542 BT_DBG("%s", hdev->name);
2543
52087a79
LAD
2544 if (!hci_conn_num(hdev, ESCO_LINK))
2545 return;
2546
b6a0dc82
MH
2547 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2548 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2549 BT_DBG("skb %p len %d", skb, skb->len);
2550 hci_send_frame(skb);
2551
2552 conn->sent++;
2553 if (conn->sent == ~0)
2554 conn->sent = 0;
2555 }
2556 }
2557}
2558
6ed58ec5
VT
2559static inline void hci_sched_le(struct hci_dev *hdev)
2560{
73d80deb 2561 struct hci_chan *chan;
6ed58ec5 2562 struct sk_buff *skb;
02b20f0b 2563 int quote, cnt, tmp;
6ed58ec5
VT
2564
2565 BT_DBG("%s", hdev->name);
2566
52087a79
LAD
2567 if (!hci_conn_num(hdev, LE_LINK))
2568 return;
2569
6ed58ec5
VT
2570 if (!test_bit(HCI_RAW, &hdev->flags)) {
2571 /* LE tx timeout must be longer than maximum
2572 * link supervision timeout (40.9 seconds) */
bae1f5d9 2573 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2574 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2575 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2576 }
2577
2578 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2579 tmp = cnt;
73d80deb 2580 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2581 u32 priority = (skb_peek(&chan->data_q))->priority;
2582 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2583 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2584 skb->len, skb->priority);
6ed58ec5 2585
ec1cce24
LAD
2586 /* Stop if priority has changed */
2587 if (skb->priority < priority)
2588 break;
2589
2590 skb = skb_dequeue(&chan->data_q);
2591
6ed58ec5
VT
2592 hci_send_frame(skb);
2593 hdev->le_last_tx = jiffies;
2594
2595 cnt--;
73d80deb
LAD
2596 chan->sent++;
2597 chan->conn->sent++;
6ed58ec5
VT
2598 }
2599 }
73d80deb 2600
6ed58ec5
VT
2601 if (hdev->le_pkts)
2602 hdev->le_cnt = cnt;
2603 else
2604 hdev->acl_cnt = cnt;
02b20f0b
LAD
2605
2606 if (cnt != tmp)
2607 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2608}
2609
3eff45ea 2610static void hci_tx_work(struct work_struct *work)
1da177e4 2611{
3eff45ea 2612 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2613 struct sk_buff *skb;
2614
6ed58ec5
VT
2615 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2616 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2617
2618 /* Schedule queues and send stuff to HCI driver */
2619
2620 hci_sched_acl(hdev);
2621
2622 hci_sched_sco(hdev);
2623
b6a0dc82
MH
2624 hci_sched_esco(hdev);
2625
6ed58ec5
VT
2626 hci_sched_le(hdev);
2627
1da177e4
LT
2628 /* Send next queued raw (unknown type) packet */
2629 while ((skb = skb_dequeue(&hdev->raw_q)))
2630 hci_send_frame(skb);
1da177e4
LT
2631}
2632
25985edc 2633/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2634
2635/* ACL data packet */
2636static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2637{
2638 struct hci_acl_hdr *hdr = (void *) skb->data;
2639 struct hci_conn *conn;
2640 __u16 handle, flags;
2641
2642 skb_pull(skb, HCI_ACL_HDR_SIZE);
2643
2644 handle = __le16_to_cpu(hdr->handle);
2645 flags = hci_flags(handle);
2646 handle = hci_handle(handle);
2647
2648 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2649
2650 hdev->stat.acl_rx++;
2651
2652 hci_dev_lock(hdev);
2653 conn = hci_conn_hash_lookup_handle(hdev, handle);
2654 hci_dev_unlock(hdev);
8e87d142 2655
1da177e4 2656 if (conn) {
65983fc7 2657 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2658
1da177e4 2659 /* Send to upper protocol */
686ebf28
UF
2660 l2cap_recv_acldata(conn, skb, flags);
2661 return;
1da177e4 2662 } else {
8e87d142 2663 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2664 hdev->name, handle);
2665 }
2666
2667 kfree_skb(skb);
2668}
2669
2670/* SCO data packet */
2671static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2672{
2673 struct hci_sco_hdr *hdr = (void *) skb->data;
2674 struct hci_conn *conn;
2675 __u16 handle;
2676
2677 skb_pull(skb, HCI_SCO_HDR_SIZE);
2678
2679 handle = __le16_to_cpu(hdr->handle);
2680
2681 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2682
2683 hdev->stat.sco_rx++;
2684
2685 hci_dev_lock(hdev);
2686 conn = hci_conn_hash_lookup_handle(hdev, handle);
2687 hci_dev_unlock(hdev);
2688
2689 if (conn) {
1da177e4 2690 /* Send to upper protocol */
686ebf28
UF
2691 sco_recv_scodata(conn, skb);
2692 return;
1da177e4 2693 } else {
8e87d142 2694 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2695 hdev->name, handle);
2696 }
2697
2698 kfree_skb(skb);
2699}
2700
b78752cc 2701static void hci_rx_work(struct work_struct *work)
1da177e4 2702{
b78752cc 2703 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2704 struct sk_buff *skb;
2705
2706 BT_DBG("%s", hdev->name);
2707
1da177e4
LT
2708 while ((skb = skb_dequeue(&hdev->rx_q))) {
2709 if (atomic_read(&hdev->promisc)) {
2710 /* Send copy to the sockets */
eec8d2bc 2711 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2712 }
2713
2714 if (test_bit(HCI_RAW, &hdev->flags)) {
2715 kfree_skb(skb);
2716 continue;
2717 }
2718
2719 if (test_bit(HCI_INIT, &hdev->flags)) {
2720 /* Don't process data packets in this states. */
0d48d939 2721 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2722 case HCI_ACLDATA_PKT:
2723 case HCI_SCODATA_PKT:
2724 kfree_skb(skb);
2725 continue;
3ff50b79 2726 }
1da177e4
LT
2727 }
2728
2729 /* Process frame */
0d48d939 2730 switch (bt_cb(skb)->pkt_type) {
1da177e4 2731 case HCI_EVENT_PKT:
b78752cc 2732 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2733 hci_event_packet(hdev, skb);
2734 break;
2735
2736 case HCI_ACLDATA_PKT:
2737 BT_DBG("%s ACL data packet", hdev->name);
2738 hci_acldata_packet(hdev, skb);
2739 break;
2740
2741 case HCI_SCODATA_PKT:
2742 BT_DBG("%s SCO data packet", hdev->name);
2743 hci_scodata_packet(hdev, skb);
2744 break;
2745
2746 default:
2747 kfree_skb(skb);
2748 break;
2749 }
2750 }
1da177e4
LT
2751}
2752
c347b765 2753static void hci_cmd_work(struct work_struct *work)
1da177e4 2754{
c347b765 2755 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2756 struct sk_buff *skb;
2757
2758 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2759
1da177e4 2760 /* Send queued commands */
5a08ecce
AE
2761 if (atomic_read(&hdev->cmd_cnt)) {
2762 skb = skb_dequeue(&hdev->cmd_q);
2763 if (!skb)
2764 return;
2765
7585b97a 2766 kfree_skb(hdev->sent_cmd);
1da177e4 2767
70f23020
AE
2768 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2769 if (hdev->sent_cmd) {
1da177e4
LT
2770 atomic_dec(&hdev->cmd_cnt);
2771 hci_send_frame(skb);
7bdb8a5c
SJ
2772 if (test_bit(HCI_RESET, &hdev->flags))
2773 del_timer(&hdev->cmd_timer);
2774 else
2775 mod_timer(&hdev->cmd_timer,
6bd32326 2776 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2777 } else {
2778 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2779 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2780 }
2781 }
2782}
2519a1fc
AG
2783
2784int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2785{
2786 /* General inquiry access code (GIAC) */
2787 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2788 struct hci_cp_inquiry cp;
2789
2790 BT_DBG("%s", hdev->name);
2791
2792 if (test_bit(HCI_INQUIRY, &hdev->flags))
2793 return -EINPROGRESS;
2794
4663262c
JH
2795 inquiry_cache_flush(hdev);
2796
2519a1fc
AG
2797 memset(&cp, 0, sizeof(cp));
2798 memcpy(&cp.lap, lap, sizeof(cp.lap));
2799 cp.length = length;
2800
2801 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2802}
023d5049
AG
2803
2804int hci_cancel_inquiry(struct hci_dev *hdev)
2805{
2806 BT_DBG("%s", hdev->name);
2807
2808 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2809 return -EPERM;
2810
2811 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2812}
7784d78f
AE
2813
2814module_param(enable_hs, bool, 0644);
2815MODULE_PARM_DESC(enable_hs, "Enable High Speed");