Bluetooth: LE scan should send Discovering events
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4
LT
42#include <linux/interrupt.h>
43#include <linux/notifier.h>
611b30f7 44#include <linux/rfkill.h>
6bd32326 45#include <linux/timer.h>
3a0259bb 46#include <linux/crypto.h>
1da177e4
LT
47#include <net/sock.h>
48
49#include <asm/system.h>
70f23020 50#include <linux/uaccess.h>
1da177e4
LT
51#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
ab81cbf9
JH
56#define AUTO_OFF_TIMEOUT 2000
57
8b281b9c 58bool enable_hs;
7784d78f 59
b78752cc 60static void hci_rx_work(struct work_struct *work);
c347b765 61static void hci_cmd_work(struct work_struct *work);
3eff45ea 62static void hci_tx_work(struct work_struct *work);
1da177e4 63
1da177e4
LT
64/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
1da177e4 72/* HCI notifiers list */
e041c683 73static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
74
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
e041c683 79 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
80}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
e041c683 84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
85}
86
6516455d 87static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 88{
e041c683 89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
90}
91
92/* ---- HCI requests ---- */
93
23bb5763 94void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 95{
23bb5763
JH
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
a5040efa
JH
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 102 return;
1da177e4
LT
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
8e87d142 123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 124 unsigned long opt, __u32 timeout)
1da177e4
LT
125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
e175072f 146 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
3ff50b79 156 }
1da177e4 157
a5040efa 158 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 166 unsigned long opt, __u32 timeout)
1da177e4
LT
167{
168 int ret;
169
7c6a329e
MH
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
1da177e4
LT
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
f630cf0d 186 set_bit(HCI_RESET, &hdev->flags);
a9de9248 187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
188}
189
e61ef499 190static void bredr_init(struct hci_dev *hdev)
1da177e4 191{
b0916ea0 192 struct hci_cp_delete_stored_link_key cp;
1ebb9252 193 __le16 param;
89f2783d 194 __u8 flt_type;
1da177e4 195
2455a3ea
AE
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
1da177e4
LT
198 /* Mandatory initialization */
199
200 /* Reset */
f630cf0d 201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 204 }
1da177e4
LT
205
206 /* Read Local Supported Features */
a9de9248 207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 208
1143e5a6 209 /* Read Local Version */
a9de9248 210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 211
1da177e4 212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 214
1da177e4 215 /* Read BD Address */
a9de9248
MH
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
223
224 /* Read Voice Setting */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
89f2783d 230 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 232
1da177e4 233 /* Connection accept timeout ~20 secs */
aca3192c 234 param = cpu_to_le16(0x7d00);
a9de9248 235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
240}
241
e61ef499
AE
242static void amp_init(struct hci_dev *hdev)
243{
2455a3ea
AE
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
e61ef499
AE
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
6ed58ec5
VT
287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
1da177e4
LT
295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
a9de9248 302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
a9de9248 312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
e4e8e37c 321 /* Encryption */
a9de9248 322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
323}
324
e4e8e37c
MH
325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
a418b893 329 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
8e87d142 335/* Get HCI device by index.
1da177e4
LT
336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
8035ded4 339 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
8035ded4 347 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
1da177e4
LT
356
357/* ---- Inquiry support ---- */
ff9ef578 358
30dc78e1
JH
359bool hci_discovery_active(struct hci_dev *hdev)
360{
361 struct discovery_state *discov = &hdev->discovery;
362
363 if (discov->state == DISCOVERY_INQUIRY ||
c599008f 364 discov->state == DISCOVERY_LE_SCAN ||
30dc78e1
JH
365 discov->state == DISCOVERY_RESOLVING)
366 return true;
367
368 return false;
369}
370
ff9ef578
JH
371void hci_discovery_set_state(struct hci_dev *hdev, int state)
372{
373 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
374
375 if (hdev->discovery.state == state)
376 return;
377
378 switch (state) {
379 case DISCOVERY_STOPPED:
380 mgmt_discovering(hdev, 0);
381 break;
382 case DISCOVERY_STARTING:
383 break;
30dc78e1 384 case DISCOVERY_INQUIRY:
c599008f 385 case DISCOVERY_LE_SCAN:
ff9ef578
JH
386 mgmt_discovering(hdev, 1);
387 break;
30dc78e1
JH
388 case DISCOVERY_RESOLVING:
389 break;
ff9ef578
JH
390 case DISCOVERY_STOPPING:
391 break;
392 }
393
394 hdev->discovery.state = state;
395}
396
1da177e4
LT
397static void inquiry_cache_flush(struct hci_dev *hdev)
398{
30883512 399 struct discovery_state *cache = &hdev->discovery;
b57c1a56 400 struct inquiry_entry *p, *n;
1da177e4 401
561aafbc
JH
402 list_for_each_entry_safe(p, n, &cache->all, all) {
403 list_del(&p->all);
b57c1a56 404 kfree(p);
1da177e4 405 }
561aafbc
JH
406
407 INIT_LIST_HEAD(&cache->unknown);
408 INIT_LIST_HEAD(&cache->resolve);
ff9ef578 409 cache->state = DISCOVERY_STOPPED;
1da177e4
LT
410}
411
412struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
413{
30883512 414 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
415 struct inquiry_entry *e;
416
417 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
418
561aafbc
JH
419 list_for_each_entry(e, &cache->all, all) {
420 if (!bacmp(&e->data.bdaddr, bdaddr))
421 return e;
422 }
423
424 return NULL;
425}
426
427struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
428 bdaddr_t *bdaddr)
429{
30883512 430 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
431 struct inquiry_entry *e;
432
433 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
434
435 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 436 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
437 return e;
438 }
439
440 return NULL;
1da177e4
LT
441}
442
30dc78e1
JH
443struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
444 bdaddr_t *bdaddr,
445 int state)
446{
447 struct discovery_state *cache = &hdev->discovery;
448 struct inquiry_entry *e;
449
450 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
451
452 list_for_each_entry(e, &cache->resolve, list) {
453 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
454 return e;
455 if (!bacmp(&e->data.bdaddr, bdaddr))
456 return e;
457 }
458
459 return NULL;
460}
461
a3d4e20a
JH
462void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
463 struct inquiry_entry *ie)
464{
465 struct discovery_state *cache = &hdev->discovery;
466 struct list_head *pos = &cache->resolve;
467 struct inquiry_entry *p;
468
469 list_del(&ie->list);
470
471 list_for_each_entry(p, &cache->resolve, list) {
472 if (p->name_state != NAME_PENDING &&
473 abs(p->data.rssi) >= abs(ie->data.rssi))
474 break;
475 pos = &p->list;
476 }
477
478 list_add(&ie->list, pos);
479}
480
3175405b 481bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
561aafbc 482 bool name_known)
1da177e4 483{
30883512 484 struct discovery_state *cache = &hdev->discovery;
70f23020 485 struct inquiry_entry *ie;
1da177e4
LT
486
487 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
488
70f23020 489 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a
JH
490 if (ie) {
491 if (ie->name_state == NAME_NEEDED &&
492 data->rssi != ie->data.rssi) {
493 ie->data.rssi = data->rssi;
494 hci_inquiry_cache_update_resolve(hdev, ie);
495 }
496
561aafbc 497 goto update;
a3d4e20a 498 }
561aafbc
JH
499
500 /* Entry not in the cache. Add new one. */
501 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
502 if (!ie)
3175405b 503 return false;
561aafbc
JH
504
505 list_add(&ie->all, &cache->all);
506
507 if (name_known) {
508 ie->name_state = NAME_KNOWN;
509 } else {
510 ie->name_state = NAME_NOT_KNOWN;
511 list_add(&ie->list, &cache->unknown);
512 }
70f23020 513
561aafbc
JH
514update:
515 if (name_known && ie->name_state != NAME_KNOWN &&
516 ie->name_state != NAME_PENDING) {
517 ie->name_state = NAME_KNOWN;
518 list_del(&ie->list);
1da177e4
LT
519 }
520
70f23020
AE
521 memcpy(&ie->data, data, sizeof(*data));
522 ie->timestamp = jiffies;
1da177e4 523 cache->timestamp = jiffies;
3175405b
JH
524
525 if (ie->name_state == NAME_NOT_KNOWN)
526 return false;
527
528 return true;
1da177e4
LT
529}
530
531static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
532{
30883512 533 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
534 struct inquiry_info *info = (struct inquiry_info *) buf;
535 struct inquiry_entry *e;
536 int copied = 0;
537
561aafbc 538 list_for_each_entry(e, &cache->all, all) {
1da177e4 539 struct inquiry_data *data = &e->data;
b57c1a56
JH
540
541 if (copied >= num)
542 break;
543
1da177e4
LT
544 bacpy(&info->bdaddr, &data->bdaddr);
545 info->pscan_rep_mode = data->pscan_rep_mode;
546 info->pscan_period_mode = data->pscan_period_mode;
547 info->pscan_mode = data->pscan_mode;
548 memcpy(info->dev_class, data->dev_class, 3);
549 info->clock_offset = data->clock_offset;
b57c1a56 550
1da177e4 551 info++;
b57c1a56 552 copied++;
1da177e4
LT
553 }
554
555 BT_DBG("cache %p, copied %d", cache, copied);
556 return copied;
557}
558
559static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
560{
561 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
562 struct hci_cp_inquiry cp;
563
564 BT_DBG("%s", hdev->name);
565
566 if (test_bit(HCI_INQUIRY, &hdev->flags))
567 return;
568
569 /* Start Inquiry */
570 memcpy(&cp.lap, &ir->lap, 3);
571 cp.length = ir->length;
572 cp.num_rsp = ir->num_rsp;
a9de9248 573 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
574}
575
576int hci_inquiry(void __user *arg)
577{
578 __u8 __user *ptr = arg;
579 struct hci_inquiry_req ir;
580 struct hci_dev *hdev;
581 int err = 0, do_inquiry = 0, max_rsp;
582 long timeo;
583 __u8 *buf;
584
585 if (copy_from_user(&ir, ptr, sizeof(ir)))
586 return -EFAULT;
587
5a08ecce
AE
588 hdev = hci_dev_get(ir.dev_id);
589 if (!hdev)
1da177e4
LT
590 return -ENODEV;
591
09fd0de5 592 hci_dev_lock(hdev);
8e87d142 593 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
594 inquiry_cache_empty(hdev) ||
595 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
596 inquiry_cache_flush(hdev);
597 do_inquiry = 1;
598 }
09fd0de5 599 hci_dev_unlock(hdev);
1da177e4 600
04837f64 601 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
602
603 if (do_inquiry) {
604 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
605 if (err < 0)
606 goto done;
607 }
1da177e4
LT
608
609 /* for unlimited number of responses we will use buffer with 255 entries */
610 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
611
612 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
613 * copy it to the user space.
614 */
01df8c31 615 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 616 if (!buf) {
1da177e4
LT
617 err = -ENOMEM;
618 goto done;
619 }
620
09fd0de5 621 hci_dev_lock(hdev);
1da177e4 622 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 623 hci_dev_unlock(hdev);
1da177e4
LT
624
625 BT_DBG("num_rsp %d", ir.num_rsp);
626
627 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
628 ptr += sizeof(ir);
629 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
630 ir.num_rsp))
631 err = -EFAULT;
8e87d142 632 } else
1da177e4
LT
633 err = -EFAULT;
634
635 kfree(buf);
636
637done:
638 hci_dev_put(hdev);
639 return err;
640}
641
642/* ---- HCI ioctl helpers ---- */
643
644int hci_dev_open(__u16 dev)
645{
646 struct hci_dev *hdev;
647 int ret = 0;
648
5a08ecce
AE
649 hdev = hci_dev_get(dev);
650 if (!hdev)
1da177e4
LT
651 return -ENODEV;
652
653 BT_DBG("%s %p", hdev->name, hdev);
654
655 hci_req_lock(hdev);
656
611b30f7
MH
657 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
658 ret = -ERFKILL;
659 goto done;
660 }
661
1da177e4
LT
662 if (test_bit(HCI_UP, &hdev->flags)) {
663 ret = -EALREADY;
664 goto done;
665 }
666
667 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
668 set_bit(HCI_RAW, &hdev->flags);
669
07e3b94a
AE
670 /* Treat all non BR/EDR controllers as raw devices if
671 enable_hs is not set */
672 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
673 set_bit(HCI_RAW, &hdev->flags);
674
1da177e4
LT
675 if (hdev->open(hdev)) {
676 ret = -EIO;
677 goto done;
678 }
679
680 if (!test_bit(HCI_RAW, &hdev->flags)) {
681 atomic_set(&hdev->cmd_cnt, 1);
682 set_bit(HCI_INIT, &hdev->flags);
a5040efa 683 hdev->init_last_cmd = 0;
1da177e4 684
04837f64
MH
685 ret = __hci_request(hdev, hci_init_req, 0,
686 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 687
eead27da 688 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
689 ret = __hci_request(hdev, hci_le_init_req, 0,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
691
1da177e4
LT
692 clear_bit(HCI_INIT, &hdev->flags);
693 }
694
695 if (!ret) {
696 hci_dev_hold(hdev);
697 set_bit(HCI_UP, &hdev->flags);
698 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 699 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 700 hci_dev_lock(hdev);
744cf19e 701 mgmt_powered(hdev, 1);
09fd0de5 702 hci_dev_unlock(hdev);
56e5cb86 703 }
8e87d142 704 } else {
1da177e4 705 /* Init failed, cleanup */
3eff45ea 706 flush_work(&hdev->tx_work);
c347b765 707 flush_work(&hdev->cmd_work);
b78752cc 708 flush_work(&hdev->rx_work);
1da177e4
LT
709
710 skb_queue_purge(&hdev->cmd_q);
711 skb_queue_purge(&hdev->rx_q);
712
713 if (hdev->flush)
714 hdev->flush(hdev);
715
716 if (hdev->sent_cmd) {
717 kfree_skb(hdev->sent_cmd);
718 hdev->sent_cmd = NULL;
719 }
720
721 hdev->close(hdev);
722 hdev->flags = 0;
723 }
724
725done:
726 hci_req_unlock(hdev);
727 hci_dev_put(hdev);
728 return ret;
729}
730
731static int hci_dev_do_close(struct hci_dev *hdev)
732{
733 BT_DBG("%s %p", hdev->name, hdev);
734
735 hci_req_cancel(hdev, ENODEV);
736 hci_req_lock(hdev);
737
738 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 739 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
740 hci_req_unlock(hdev);
741 return 0;
742 }
743
3eff45ea
GP
744 /* Flush RX and TX works */
745 flush_work(&hdev->tx_work);
b78752cc 746 flush_work(&hdev->rx_work);
1da177e4 747
16ab91ab 748 if (hdev->discov_timeout > 0) {
e0f9309f 749 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
750 hdev->discov_timeout = 0;
751 }
752
a8b2d5c2 753 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 754 cancel_delayed_work(&hdev->power_off);
3243553f 755
a8b2d5c2 756 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
757 cancel_delayed_work(&hdev->service_cache);
758
09fd0de5 759 hci_dev_lock(hdev);
1da177e4
LT
760 inquiry_cache_flush(hdev);
761 hci_conn_hash_flush(hdev);
09fd0de5 762 hci_dev_unlock(hdev);
1da177e4
LT
763
764 hci_notify(hdev, HCI_DEV_DOWN);
765
766 if (hdev->flush)
767 hdev->flush(hdev);
768
769 /* Reset device */
770 skb_queue_purge(&hdev->cmd_q);
771 atomic_set(&hdev->cmd_cnt, 1);
8af59467
JH
772 if (!test_bit(HCI_RAW, &hdev->flags) &&
773 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
1da177e4 774 set_bit(HCI_INIT, &hdev->flags);
04837f64 775 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 776 msecs_to_jiffies(250));
1da177e4
LT
777 clear_bit(HCI_INIT, &hdev->flags);
778 }
779
c347b765
GP
780 /* flush cmd work */
781 flush_work(&hdev->cmd_work);
1da177e4
LT
782
783 /* Drop queues */
784 skb_queue_purge(&hdev->rx_q);
785 skb_queue_purge(&hdev->cmd_q);
786 skb_queue_purge(&hdev->raw_q);
787
788 /* Drop last sent command */
789 if (hdev->sent_cmd) {
b79f44c1 790 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
791 kfree_skb(hdev->sent_cmd);
792 hdev->sent_cmd = NULL;
793 }
794
795 /* After this point our queues are empty
796 * and no tasks are scheduled. */
797 hdev->close(hdev);
798
09fd0de5 799 hci_dev_lock(hdev);
744cf19e 800 mgmt_powered(hdev, 0);
09fd0de5 801 hci_dev_unlock(hdev);
5add6af8 802
1da177e4
LT
803 /* Clear flags */
804 hdev->flags = 0;
805
806 hci_req_unlock(hdev);
807
808 hci_dev_put(hdev);
809 return 0;
810}
811
812int hci_dev_close(__u16 dev)
813{
814 struct hci_dev *hdev;
815 int err;
816
70f23020
AE
817 hdev = hci_dev_get(dev);
818 if (!hdev)
1da177e4
LT
819 return -ENODEV;
820 err = hci_dev_do_close(hdev);
821 hci_dev_put(hdev);
822 return err;
823}
824
825int hci_dev_reset(__u16 dev)
826{
827 struct hci_dev *hdev;
828 int ret = 0;
829
70f23020
AE
830 hdev = hci_dev_get(dev);
831 if (!hdev)
1da177e4
LT
832 return -ENODEV;
833
834 hci_req_lock(hdev);
1da177e4
LT
835
836 if (!test_bit(HCI_UP, &hdev->flags))
837 goto done;
838
839 /* Drop queues */
840 skb_queue_purge(&hdev->rx_q);
841 skb_queue_purge(&hdev->cmd_q);
842
09fd0de5 843 hci_dev_lock(hdev);
1da177e4
LT
844 inquiry_cache_flush(hdev);
845 hci_conn_hash_flush(hdev);
09fd0de5 846 hci_dev_unlock(hdev);
1da177e4
LT
847
848 if (hdev->flush)
849 hdev->flush(hdev);
850
8e87d142 851 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 852 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
853
854 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
855 ret = __hci_request(hdev, hci_reset_req, 0,
856 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
857
858done:
1da177e4
LT
859 hci_req_unlock(hdev);
860 hci_dev_put(hdev);
861 return ret;
862}
863
864int hci_dev_reset_stat(__u16 dev)
865{
866 struct hci_dev *hdev;
867 int ret = 0;
868
70f23020
AE
869 hdev = hci_dev_get(dev);
870 if (!hdev)
1da177e4
LT
871 return -ENODEV;
872
873 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
874
875 hci_dev_put(hdev);
876
877 return ret;
878}
879
880int hci_dev_cmd(unsigned int cmd, void __user *arg)
881{
882 struct hci_dev *hdev;
883 struct hci_dev_req dr;
884 int err = 0;
885
886 if (copy_from_user(&dr, arg, sizeof(dr)))
887 return -EFAULT;
888
70f23020
AE
889 hdev = hci_dev_get(dr.dev_id);
890 if (!hdev)
1da177e4
LT
891 return -ENODEV;
892
893 switch (cmd) {
894 case HCISETAUTH:
04837f64
MH
895 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
896 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
897 break;
898
899 case HCISETENCRYPT:
900 if (!lmp_encrypt_capable(hdev)) {
901 err = -EOPNOTSUPP;
902 break;
903 }
904
905 if (!test_bit(HCI_AUTH, &hdev->flags)) {
906 /* Auth must be enabled first */
04837f64
MH
907 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
908 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
909 if (err)
910 break;
911 }
912
04837f64
MH
913 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
914 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
915 break;
916
917 case HCISETSCAN:
04837f64
MH
918 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
919 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
920 break;
921
1da177e4 922 case HCISETLINKPOL:
e4e8e37c
MH
923 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
925 break;
926
927 case HCISETLINKMODE:
e4e8e37c
MH
928 hdev->link_mode = ((__u16) dr.dev_opt) &
929 (HCI_LM_MASTER | HCI_LM_ACCEPT);
930 break;
931
932 case HCISETPTYPE:
933 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
934 break;
935
936 case HCISETACLMTU:
e4e8e37c
MH
937 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
938 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
939 break;
940
941 case HCISETSCOMTU:
e4e8e37c
MH
942 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
943 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
944 break;
945
946 default:
947 err = -EINVAL;
948 break;
949 }
e4e8e37c 950
1da177e4
LT
951 hci_dev_put(hdev);
952 return err;
953}
954
955int hci_get_dev_list(void __user *arg)
956{
8035ded4 957 struct hci_dev *hdev;
1da177e4
LT
958 struct hci_dev_list_req *dl;
959 struct hci_dev_req *dr;
1da177e4
LT
960 int n = 0, size, err;
961 __u16 dev_num;
962
963 if (get_user(dev_num, (__u16 __user *) arg))
964 return -EFAULT;
965
966 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
967 return -EINVAL;
968
969 size = sizeof(*dl) + dev_num * sizeof(*dr);
970
70f23020
AE
971 dl = kzalloc(size, GFP_KERNEL);
972 if (!dl)
1da177e4
LT
973 return -ENOMEM;
974
975 dr = dl->dev_req;
976
f20d09d5 977 read_lock(&hci_dev_list_lock);
8035ded4 978 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 979 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 980 cancel_delayed_work(&hdev->power_off);
c542a06c 981
a8b2d5c2
JH
982 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
983 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 984
1da177e4
LT
985 (dr + n)->dev_id = hdev->id;
986 (dr + n)->dev_opt = hdev->flags;
c542a06c 987
1da177e4
LT
988 if (++n >= dev_num)
989 break;
990 }
f20d09d5 991 read_unlock(&hci_dev_list_lock);
1da177e4
LT
992
993 dl->dev_num = n;
994 size = sizeof(*dl) + n * sizeof(*dr);
995
996 err = copy_to_user(arg, dl, size);
997 kfree(dl);
998
999 return err ? -EFAULT : 0;
1000}
1001
1002int hci_get_dev_info(void __user *arg)
1003{
1004 struct hci_dev *hdev;
1005 struct hci_dev_info di;
1006 int err = 0;
1007
1008 if (copy_from_user(&di, arg, sizeof(di)))
1009 return -EFAULT;
1010
70f23020
AE
1011 hdev = hci_dev_get(di.dev_id);
1012 if (!hdev)
1da177e4
LT
1013 return -ENODEV;
1014
a8b2d5c2 1015 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1016 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1017
a8b2d5c2
JH
1018 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1019 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1020
1da177e4
LT
1021 strcpy(di.name, hdev->name);
1022 di.bdaddr = hdev->bdaddr;
943da25d 1023 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1024 di.flags = hdev->flags;
1025 di.pkt_type = hdev->pkt_type;
1026 di.acl_mtu = hdev->acl_mtu;
1027 di.acl_pkts = hdev->acl_pkts;
1028 di.sco_mtu = hdev->sco_mtu;
1029 di.sco_pkts = hdev->sco_pkts;
1030 di.link_policy = hdev->link_policy;
1031 di.link_mode = hdev->link_mode;
1032
1033 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1034 memcpy(&di.features, &hdev->features, sizeof(di.features));
1035
1036 if (copy_to_user(arg, &di, sizeof(di)))
1037 err = -EFAULT;
1038
1039 hci_dev_put(hdev);
1040
1041 return err;
1042}
1043
1044/* ---- Interface to HCI drivers ---- */
1045
611b30f7
MH
1046static int hci_rfkill_set_block(void *data, bool blocked)
1047{
1048 struct hci_dev *hdev = data;
1049
1050 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1051
1052 if (!blocked)
1053 return 0;
1054
1055 hci_dev_do_close(hdev);
1056
1057 return 0;
1058}
1059
1060static const struct rfkill_ops hci_rfkill_ops = {
1061 .set_block = hci_rfkill_set_block,
1062};
1063
1da177e4
LT
1064/* Alloc HCI device */
1065struct hci_dev *hci_alloc_dev(void)
1066{
1067 struct hci_dev *hdev;
1068
25ea6db0 1069 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
1070 if (!hdev)
1071 return NULL;
1072
0ac7e700 1073 hci_init_sysfs(hdev);
1da177e4
LT
1074 skb_queue_head_init(&hdev->driver_init);
1075
1076 return hdev;
1077}
1078EXPORT_SYMBOL(hci_alloc_dev);
1079
1080/* Free HCI device */
1081void hci_free_dev(struct hci_dev *hdev)
1082{
1083 skb_queue_purge(&hdev->driver_init);
1084
a91f2e39
MH
1085 /* will free via device release */
1086 put_device(&hdev->dev);
1da177e4
LT
1087}
1088EXPORT_SYMBOL(hci_free_dev);
1089
ab81cbf9
JH
1090static void hci_power_on(struct work_struct *work)
1091{
1092 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1093
1094 BT_DBG("%s", hdev->name);
1095
1096 if (hci_dev_open(hdev->id) < 0)
1097 return;
1098
a8b2d5c2 1099 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1100 schedule_delayed_work(&hdev->power_off,
3243553f 1101 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1102
a8b2d5c2 1103 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1104 mgmt_index_added(hdev);
ab81cbf9
JH
1105}
1106
1107static void hci_power_off(struct work_struct *work)
1108{
3243553f
JH
1109 struct hci_dev *hdev = container_of(work, struct hci_dev,
1110 power_off.work);
ab81cbf9
JH
1111
1112 BT_DBG("%s", hdev->name);
1113
a8b2d5c2 1114 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ab81cbf9 1115
3243553f 1116 hci_dev_close(hdev->id);
ab81cbf9
JH
1117}
1118
16ab91ab
JH
1119static void hci_discov_off(struct work_struct *work)
1120{
1121 struct hci_dev *hdev;
1122 u8 scan = SCAN_PAGE;
1123
1124 hdev = container_of(work, struct hci_dev, discov_off.work);
1125
1126 BT_DBG("%s", hdev->name);
1127
09fd0de5 1128 hci_dev_lock(hdev);
16ab91ab
JH
1129
1130 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1131
1132 hdev->discov_timeout = 0;
1133
09fd0de5 1134 hci_dev_unlock(hdev);
16ab91ab
JH
1135}
1136
2aeb9a1a
JH
1137int hci_uuids_clear(struct hci_dev *hdev)
1138{
1139 struct list_head *p, *n;
1140
1141 list_for_each_safe(p, n, &hdev->uuids) {
1142 struct bt_uuid *uuid;
1143
1144 uuid = list_entry(p, struct bt_uuid, list);
1145
1146 list_del(p);
1147 kfree(uuid);
1148 }
1149
1150 return 0;
1151}
1152
55ed8ca1
JH
1153int hci_link_keys_clear(struct hci_dev *hdev)
1154{
1155 struct list_head *p, *n;
1156
1157 list_for_each_safe(p, n, &hdev->link_keys) {
1158 struct link_key *key;
1159
1160 key = list_entry(p, struct link_key, list);
1161
1162 list_del(p);
1163 kfree(key);
1164 }
1165
1166 return 0;
1167}
1168
b899efaf
VCG
1169int hci_smp_ltks_clear(struct hci_dev *hdev)
1170{
1171 struct smp_ltk *k, *tmp;
1172
1173 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1174 list_del(&k->list);
1175 kfree(k);
1176 }
1177
1178 return 0;
1179}
1180
55ed8ca1
JH
1181struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1182{
8035ded4 1183 struct link_key *k;
55ed8ca1 1184
8035ded4 1185 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1186 if (bacmp(bdaddr, &k->bdaddr) == 0)
1187 return k;
55ed8ca1
JH
1188
1189 return NULL;
1190}
1191
d25e28ab
JH
1192static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1193 u8 key_type, u8 old_key_type)
1194{
1195 /* Legacy key */
1196 if (key_type < 0x03)
1197 return 1;
1198
1199 /* Debug keys are insecure so don't store them persistently */
1200 if (key_type == HCI_LK_DEBUG_COMBINATION)
1201 return 0;
1202
1203 /* Changed combination key and there's no previous one */
1204 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1205 return 0;
1206
1207 /* Security mode 3 case */
1208 if (!conn)
1209 return 1;
1210
1211 /* Neither local nor remote side had no-bonding as requirement */
1212 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1213 return 1;
1214
1215 /* Local side had dedicated bonding as requirement */
1216 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1217 return 1;
1218
1219 /* Remote side had dedicated bonding as requirement */
1220 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1221 return 1;
1222
1223 /* If none of the above criteria match, then don't store the key
1224 * persistently */
1225 return 0;
1226}
1227
c9839a11 1228struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1229{
c9839a11 1230 struct smp_ltk *k;
75d262c2 1231
c9839a11
VCG
1232 list_for_each_entry(k, &hdev->long_term_keys, list) {
1233 if (k->ediv != ediv ||
1234 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1235 continue;
1236
c9839a11 1237 return k;
75d262c2
VCG
1238 }
1239
1240 return NULL;
1241}
1242EXPORT_SYMBOL(hci_find_ltk);
1243
c9839a11
VCG
1244struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1245 u8 addr_type)
75d262c2 1246{
c9839a11 1247 struct smp_ltk *k;
75d262c2 1248
c9839a11
VCG
1249 list_for_each_entry(k, &hdev->long_term_keys, list)
1250 if (addr_type == k->bdaddr_type &&
1251 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1252 return k;
1253
1254 return NULL;
1255}
c9839a11 1256EXPORT_SYMBOL(hci_find_ltk_by_addr);
75d262c2 1257
d25e28ab
JH
1258int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1259 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1260{
1261 struct link_key *key, *old_key;
4df378a1 1262 u8 old_key_type, persistent;
55ed8ca1
JH
1263
1264 old_key = hci_find_link_key(hdev, bdaddr);
1265 if (old_key) {
1266 old_key_type = old_key->type;
1267 key = old_key;
1268 } else {
12adcf3a 1269 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1270 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1271 if (!key)
1272 return -ENOMEM;
1273 list_add(&key->list, &hdev->link_keys);
1274 }
1275
1276 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1277
d25e28ab
JH
1278 /* Some buggy controller combinations generate a changed
1279 * combination key for legacy pairing even when there's no
1280 * previous key */
1281 if (type == HCI_LK_CHANGED_COMBINATION &&
1282 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1283 old_key_type == 0xff) {
d25e28ab 1284 type = HCI_LK_COMBINATION;
655fe6ec
JH
1285 if (conn)
1286 conn->key_type = type;
1287 }
d25e28ab 1288
55ed8ca1
JH
1289 bacpy(&key->bdaddr, bdaddr);
1290 memcpy(key->val, val, 16);
55ed8ca1
JH
1291 key->pin_len = pin_len;
1292
b6020ba0 1293 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1294 key->type = old_key_type;
4748fed2
JH
1295 else
1296 key->type = type;
1297
4df378a1
JH
1298 if (!new_key)
1299 return 0;
1300
1301 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1302
744cf19e 1303 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1304
1305 if (!persistent) {
1306 list_del(&key->list);
1307 kfree(key);
1308 }
55ed8ca1
JH
1309
1310 return 0;
1311}
1312
c9839a11
VCG
1313int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1314 int new_key, u8 authenticated, u8 tk[16],
1315 u8 enc_size, u16 ediv, u8 rand[8])
75d262c2 1316{
c9839a11 1317 struct smp_ltk *key, *old_key;
75d262c2 1318
c9839a11
VCG
1319 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1320 return 0;
75d262c2 1321
c9839a11
VCG
1322 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1323 if (old_key)
75d262c2 1324 key = old_key;
c9839a11
VCG
1325 else {
1326 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1327 if (!key)
1328 return -ENOMEM;
c9839a11 1329 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1330 }
1331
75d262c2 1332 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1333 key->bdaddr_type = addr_type;
1334 memcpy(key->val, tk, sizeof(key->val));
1335 key->authenticated = authenticated;
1336 key->ediv = ediv;
1337 key->enc_size = enc_size;
1338 key->type = type;
1339 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1340
c9839a11
VCG
1341 if (!new_key)
1342 return 0;
75d262c2 1343
261cc5aa
VCG
1344 if (type & HCI_SMP_LTK)
1345 mgmt_new_ltk(hdev, key, 1);
1346
75d262c2
VCG
1347 return 0;
1348}
1349
55ed8ca1
JH
1350int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1351{
1352 struct link_key *key;
1353
1354 key = hci_find_link_key(hdev, bdaddr);
1355 if (!key)
1356 return -ENOENT;
1357
1358 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1359
1360 list_del(&key->list);
1361 kfree(key);
1362
1363 return 0;
1364}
1365
b899efaf
VCG
1366int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1367{
1368 struct smp_ltk *k, *tmp;
1369
1370 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1371 if (bacmp(bdaddr, &k->bdaddr))
1372 continue;
1373
1374 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1375
1376 list_del(&k->list);
1377 kfree(k);
1378 }
1379
1380 return 0;
1381}
1382
6bd32326
VT
1383/* HCI command timer function */
1384static void hci_cmd_timer(unsigned long arg)
1385{
1386 struct hci_dev *hdev = (void *) arg;
1387
1388 BT_ERR("%s command tx timeout", hdev->name);
1389 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1390 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1391}
1392
2763eda6
SJ
1393struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1394 bdaddr_t *bdaddr)
1395{
1396 struct oob_data *data;
1397
1398 list_for_each_entry(data, &hdev->remote_oob_data, list)
1399 if (bacmp(bdaddr, &data->bdaddr) == 0)
1400 return data;
1401
1402 return NULL;
1403}
1404
1405int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1406{
1407 struct oob_data *data;
1408
1409 data = hci_find_remote_oob_data(hdev, bdaddr);
1410 if (!data)
1411 return -ENOENT;
1412
1413 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1414
1415 list_del(&data->list);
1416 kfree(data);
1417
1418 return 0;
1419}
1420
1421int hci_remote_oob_data_clear(struct hci_dev *hdev)
1422{
1423 struct oob_data *data, *n;
1424
1425 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1426 list_del(&data->list);
1427 kfree(data);
1428 }
1429
1430 return 0;
1431}
1432
1433int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1434 u8 *randomizer)
1435{
1436 struct oob_data *data;
1437
1438 data = hci_find_remote_oob_data(hdev, bdaddr);
1439
1440 if (!data) {
1441 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1442 if (!data)
1443 return -ENOMEM;
1444
1445 bacpy(&data->bdaddr, bdaddr);
1446 list_add(&data->list, &hdev->remote_oob_data);
1447 }
1448
1449 memcpy(data->hash, hash, sizeof(data->hash));
1450 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1451
1452 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1453
1454 return 0;
1455}
1456
b2a66aad
AJ
1457struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1458 bdaddr_t *bdaddr)
1459{
8035ded4 1460 struct bdaddr_list *b;
b2a66aad 1461
8035ded4 1462 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1463 if (bacmp(bdaddr, &b->bdaddr) == 0)
1464 return b;
b2a66aad
AJ
1465
1466 return NULL;
1467}
1468
1469int hci_blacklist_clear(struct hci_dev *hdev)
1470{
1471 struct list_head *p, *n;
1472
1473 list_for_each_safe(p, n, &hdev->blacklist) {
1474 struct bdaddr_list *b;
1475
1476 b = list_entry(p, struct bdaddr_list, list);
1477
1478 list_del(p);
1479 kfree(b);
1480 }
1481
1482 return 0;
1483}
1484
1485int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1486{
1487 struct bdaddr_list *entry;
b2a66aad
AJ
1488
1489 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1490 return -EBADF;
1491
5e762444
AJ
1492 if (hci_blacklist_lookup(hdev, bdaddr))
1493 return -EEXIST;
b2a66aad
AJ
1494
1495 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1496 if (!entry)
1497 return -ENOMEM;
b2a66aad
AJ
1498
1499 bacpy(&entry->bdaddr, bdaddr);
1500
1501 list_add(&entry->list, &hdev->blacklist);
1502
744cf19e 1503 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1504}
1505
1506int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1507{
1508 struct bdaddr_list *entry;
b2a66aad 1509
1ec918ce 1510 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1511 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1512
1513 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1514 if (!entry)
5e762444 1515 return -ENOENT;
b2a66aad
AJ
1516
1517 list_del(&entry->list);
1518 kfree(entry);
1519
744cf19e 1520 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1521}
1522
db323f2f 1523static void hci_clear_adv_cache(struct work_struct *work)
35815085 1524{
db323f2f
GP
1525 struct hci_dev *hdev = container_of(work, struct hci_dev,
1526 adv_work.work);
35815085
AG
1527
1528 hci_dev_lock(hdev);
1529
1530 hci_adv_entries_clear(hdev);
1531
1532 hci_dev_unlock(hdev);
1533}
1534
76c8686f
AG
1535int hci_adv_entries_clear(struct hci_dev *hdev)
1536{
1537 struct adv_entry *entry, *tmp;
1538
1539 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1540 list_del(&entry->list);
1541 kfree(entry);
1542 }
1543
1544 BT_DBG("%s adv cache cleared", hdev->name);
1545
1546 return 0;
1547}
1548
1549struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1550{
1551 struct adv_entry *entry;
1552
1553 list_for_each_entry(entry, &hdev->adv_entries, list)
1554 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1555 return entry;
1556
1557 return NULL;
1558}
1559
1560static inline int is_connectable_adv(u8 evt_type)
1561{
1562 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1563 return 1;
1564
1565 return 0;
1566}
1567
1568int hci_add_adv_entry(struct hci_dev *hdev,
1569 struct hci_ev_le_advertising_info *ev)
1570{
1571 struct adv_entry *entry;
1572
1573 if (!is_connectable_adv(ev->evt_type))
1574 return -EINVAL;
1575
1576 /* Only new entries should be added to adv_entries. So, if
1577 * bdaddr was found, don't add it. */
1578 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1579 return 0;
1580
4777bfde 1581 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
76c8686f
AG
1582 if (!entry)
1583 return -ENOMEM;
1584
1585 bacpy(&entry->bdaddr, &ev->bdaddr);
1586 entry->bdaddr_type = ev->bdaddr_type;
1587
1588 list_add(&entry->list, &hdev->adv_entries);
1589
1590 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1591 batostr(&entry->bdaddr), entry->bdaddr_type);
1592
1593 return 0;
1594}
1595
1da177e4
LT
1596/* Register HCI device */
1597int hci_register_dev(struct hci_dev *hdev)
1598{
1599 struct list_head *head = &hci_dev_list, *p;
08add513 1600 int i, id, error;
1da177e4 1601
e9b9cfa1 1602 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1603
010666a1 1604 if (!hdev->open || !hdev->close)
1da177e4
LT
1605 return -EINVAL;
1606
08add513
MM
1607 /* Do not allow HCI_AMP devices to register at index 0,
1608 * so the index can be used as the AMP controller ID.
1609 */
1610 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1611
f20d09d5 1612 write_lock(&hci_dev_list_lock);
1da177e4
LT
1613
1614 /* Find first available device id */
1615 list_for_each(p, &hci_dev_list) {
1616 if (list_entry(p, struct hci_dev, list)->id != id)
1617 break;
1618 head = p; id++;
1619 }
8e87d142 1620
1da177e4
LT
1621 sprintf(hdev->name, "hci%d", id);
1622 hdev->id = id;
c6feeb28 1623 list_add_tail(&hdev->list, head);
1da177e4 1624
09fd0de5 1625 mutex_init(&hdev->lock);
1da177e4
LT
1626
1627 hdev->flags = 0;
d23264a8 1628 hdev->dev_flags = 0;
1da177e4 1629 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1630 hdev->esco_type = (ESCO_HV1);
1da177e4 1631 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1632 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1633
04837f64
MH
1634 hdev->idle_timeout = 0;
1635 hdev->sniff_max_interval = 800;
1636 hdev->sniff_min_interval = 80;
1637
b78752cc 1638 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1639 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1640 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1641
1da177e4
LT
1642
1643 skb_queue_head_init(&hdev->rx_q);
1644 skb_queue_head_init(&hdev->cmd_q);
1645 skb_queue_head_init(&hdev->raw_q);
1646
6bd32326
VT
1647 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1648
cd4c5391 1649 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1650 hdev->reassembly[i] = NULL;
1651
1da177e4 1652 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1653 mutex_init(&hdev->req_lock);
1da177e4 1654
30883512 1655 discovery_init(hdev);
1da177e4
LT
1656
1657 hci_conn_hash_init(hdev);
1658
2e58ef3e
JH
1659 INIT_LIST_HEAD(&hdev->mgmt_pending);
1660
ea4bd8ba 1661 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1662
2aeb9a1a
JH
1663 INIT_LIST_HEAD(&hdev->uuids);
1664
55ed8ca1 1665 INIT_LIST_HEAD(&hdev->link_keys);
b899efaf 1666 INIT_LIST_HEAD(&hdev->long_term_keys);
55ed8ca1 1667
2763eda6
SJ
1668 INIT_LIST_HEAD(&hdev->remote_oob_data);
1669
76c8686f
AG
1670 INIT_LIST_HEAD(&hdev->adv_entries);
1671
db323f2f 1672 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1673 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1674 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1675
16ab91ab
JH
1676 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1677
1da177e4
LT
1678 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1679
1680 atomic_set(&hdev->promisc, 0);
1681
f20d09d5 1682 write_unlock(&hci_dev_list_lock);
1da177e4 1683
32845eb1
GP
1684 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1685 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1686 if (!hdev->workqueue) {
1687 error = -ENOMEM;
1688 goto err;
1689 }
f48fd9c8 1690
33ca954d
DH
1691 error = hci_add_sysfs(hdev);
1692 if (error < 0)
1693 goto err_wqueue;
1da177e4 1694
611b30f7
MH
1695 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1696 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1697 if (hdev->rfkill) {
1698 if (rfkill_register(hdev->rfkill) < 0) {
1699 rfkill_destroy(hdev->rfkill);
1700 hdev->rfkill = NULL;
1701 }
1702 }
1703
a8b2d5c2
JH
1704 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1705 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1706 schedule_work(&hdev->power_on);
ab81cbf9 1707
1da177e4 1708 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1709 hci_dev_hold(hdev);
1da177e4
LT
1710
1711 return id;
f48fd9c8 1712
33ca954d
DH
1713err_wqueue:
1714 destroy_workqueue(hdev->workqueue);
1715err:
f20d09d5 1716 write_lock(&hci_dev_list_lock);
f48fd9c8 1717 list_del(&hdev->list);
f20d09d5 1718 write_unlock(&hci_dev_list_lock);
f48fd9c8 1719
33ca954d 1720 return error;
1da177e4
LT
1721}
1722EXPORT_SYMBOL(hci_register_dev);
1723
1724/* Unregister HCI device */
59735631 1725void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1726{
ef222013
MH
1727 int i;
1728
c13854ce 1729 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1730
f20d09d5 1731 write_lock(&hci_dev_list_lock);
1da177e4 1732 list_del(&hdev->list);
f20d09d5 1733 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1734
1735 hci_dev_do_close(hdev);
1736
cd4c5391 1737 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1738 kfree_skb(hdev->reassembly[i]);
1739
ab81cbf9 1740 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8b2d5c2 1741 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1742 hci_dev_lock(hdev);
744cf19e 1743 mgmt_index_removed(hdev);
09fd0de5 1744 hci_dev_unlock(hdev);
56e5cb86 1745 }
ab81cbf9 1746
2e58ef3e
JH
1747 /* mgmt_index_removed should take care of emptying the
1748 * pending list */
1749 BUG_ON(!list_empty(&hdev->mgmt_pending));
1750
1da177e4
LT
1751 hci_notify(hdev, HCI_DEV_UNREG);
1752
611b30f7
MH
1753 if (hdev->rfkill) {
1754 rfkill_unregister(hdev->rfkill);
1755 rfkill_destroy(hdev->rfkill);
1756 }
1757
ce242970 1758 hci_del_sysfs(hdev);
147e2d59 1759
db323f2f 1760 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1761
f48fd9c8
MH
1762 destroy_workqueue(hdev->workqueue);
1763
09fd0de5 1764 hci_dev_lock(hdev);
e2e0cacb 1765 hci_blacklist_clear(hdev);
2aeb9a1a 1766 hci_uuids_clear(hdev);
55ed8ca1 1767 hci_link_keys_clear(hdev);
b899efaf 1768 hci_smp_ltks_clear(hdev);
2763eda6 1769 hci_remote_oob_data_clear(hdev);
76c8686f 1770 hci_adv_entries_clear(hdev);
09fd0de5 1771 hci_dev_unlock(hdev);
e2e0cacb 1772
dc946bd8 1773 hci_dev_put(hdev);
1da177e4
LT
1774}
1775EXPORT_SYMBOL(hci_unregister_dev);
1776
1777/* Suspend HCI device */
1778int hci_suspend_dev(struct hci_dev *hdev)
1779{
1780 hci_notify(hdev, HCI_DEV_SUSPEND);
1781 return 0;
1782}
1783EXPORT_SYMBOL(hci_suspend_dev);
1784
1785/* Resume HCI device */
1786int hci_resume_dev(struct hci_dev *hdev)
1787{
1788 hci_notify(hdev, HCI_DEV_RESUME);
1789 return 0;
1790}
1791EXPORT_SYMBOL(hci_resume_dev);
1792
76bca880
MH
1793/* Receive frame from HCI drivers */
1794int hci_recv_frame(struct sk_buff *skb)
1795{
1796 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1797 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1798 && !test_bit(HCI_INIT, &hdev->flags))) {
1799 kfree_skb(skb);
1800 return -ENXIO;
1801 }
1802
1803 /* Incomming skb */
1804 bt_cb(skb)->incoming = 1;
1805
1806 /* Time stamp */
1807 __net_timestamp(skb);
1808
76bca880 1809 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1810 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1811
76bca880
MH
1812 return 0;
1813}
1814EXPORT_SYMBOL(hci_recv_frame);
1815
33e882a5 1816static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1817 int count, __u8 index)
33e882a5
SS
1818{
1819 int len = 0;
1820 int hlen = 0;
1821 int remain = count;
1822 struct sk_buff *skb;
1823 struct bt_skb_cb *scb;
1824
1825 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1826 index >= NUM_REASSEMBLY)
1827 return -EILSEQ;
1828
1829 skb = hdev->reassembly[index];
1830
1831 if (!skb) {
1832 switch (type) {
1833 case HCI_ACLDATA_PKT:
1834 len = HCI_MAX_FRAME_SIZE;
1835 hlen = HCI_ACL_HDR_SIZE;
1836 break;
1837 case HCI_EVENT_PKT:
1838 len = HCI_MAX_EVENT_SIZE;
1839 hlen = HCI_EVENT_HDR_SIZE;
1840 break;
1841 case HCI_SCODATA_PKT:
1842 len = HCI_MAX_SCO_SIZE;
1843 hlen = HCI_SCO_HDR_SIZE;
1844 break;
1845 }
1846
1e429f38 1847 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1848 if (!skb)
1849 return -ENOMEM;
1850
1851 scb = (void *) skb->cb;
1852 scb->expect = hlen;
1853 scb->pkt_type = type;
1854
1855 skb->dev = (void *) hdev;
1856 hdev->reassembly[index] = skb;
1857 }
1858
1859 while (count) {
1860 scb = (void *) skb->cb;
1861 len = min(scb->expect, (__u16)count);
1862
1863 memcpy(skb_put(skb, len), data, len);
1864
1865 count -= len;
1866 data += len;
1867 scb->expect -= len;
1868 remain = count;
1869
1870 switch (type) {
1871 case HCI_EVENT_PKT:
1872 if (skb->len == HCI_EVENT_HDR_SIZE) {
1873 struct hci_event_hdr *h = hci_event_hdr(skb);
1874 scb->expect = h->plen;
1875
1876 if (skb_tailroom(skb) < scb->expect) {
1877 kfree_skb(skb);
1878 hdev->reassembly[index] = NULL;
1879 return -ENOMEM;
1880 }
1881 }
1882 break;
1883
1884 case HCI_ACLDATA_PKT:
1885 if (skb->len == HCI_ACL_HDR_SIZE) {
1886 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1887 scb->expect = __le16_to_cpu(h->dlen);
1888
1889 if (skb_tailroom(skb) < scb->expect) {
1890 kfree_skb(skb);
1891 hdev->reassembly[index] = NULL;
1892 return -ENOMEM;
1893 }
1894 }
1895 break;
1896
1897 case HCI_SCODATA_PKT:
1898 if (skb->len == HCI_SCO_HDR_SIZE) {
1899 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1900 scb->expect = h->dlen;
1901
1902 if (skb_tailroom(skb) < scb->expect) {
1903 kfree_skb(skb);
1904 hdev->reassembly[index] = NULL;
1905 return -ENOMEM;
1906 }
1907 }
1908 break;
1909 }
1910
1911 if (scb->expect == 0) {
1912 /* Complete frame */
1913
1914 bt_cb(skb)->pkt_type = type;
1915 hci_recv_frame(skb);
1916
1917 hdev->reassembly[index] = NULL;
1918 return remain;
1919 }
1920 }
1921
1922 return remain;
1923}
1924
ef222013
MH
1925int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1926{
f39a3c06
SS
1927 int rem = 0;
1928
ef222013
MH
1929 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1930 return -EILSEQ;
1931
da5f6c37 1932 while (count) {
1e429f38 1933 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1934 if (rem < 0)
1935 return rem;
ef222013 1936
f39a3c06
SS
1937 data += (count - rem);
1938 count = rem;
f81c6224 1939 }
ef222013 1940
f39a3c06 1941 return rem;
ef222013
MH
1942}
1943EXPORT_SYMBOL(hci_recv_fragment);
1944
99811510
SS
1945#define STREAM_REASSEMBLY 0
1946
1947int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1948{
1949 int type;
1950 int rem = 0;
1951
da5f6c37 1952 while (count) {
99811510
SS
1953 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1954
1955 if (!skb) {
1956 struct { char type; } *pkt;
1957
1958 /* Start of the frame */
1959 pkt = data;
1960 type = pkt->type;
1961
1962 data++;
1963 count--;
1964 } else
1965 type = bt_cb(skb)->pkt_type;
1966
1e429f38
GP
1967 rem = hci_reassembly(hdev, type, data, count,
1968 STREAM_REASSEMBLY);
99811510
SS
1969 if (rem < 0)
1970 return rem;
1971
1972 data += (count - rem);
1973 count = rem;
f81c6224 1974 }
99811510
SS
1975
1976 return rem;
1977}
1978EXPORT_SYMBOL(hci_recv_stream_fragment);
1979
1da177e4
LT
1980/* ---- Interface to upper protocols ---- */
1981
1da177e4
LT
1982int hci_register_cb(struct hci_cb *cb)
1983{
1984 BT_DBG("%p name %s", cb, cb->name);
1985
f20d09d5 1986 write_lock(&hci_cb_list_lock);
1da177e4 1987 list_add(&cb->list, &hci_cb_list);
f20d09d5 1988 write_unlock(&hci_cb_list_lock);
1da177e4
LT
1989
1990 return 0;
1991}
1992EXPORT_SYMBOL(hci_register_cb);
1993
1994int hci_unregister_cb(struct hci_cb *cb)
1995{
1996 BT_DBG("%p name %s", cb, cb->name);
1997
f20d09d5 1998 write_lock(&hci_cb_list_lock);
1da177e4 1999 list_del(&cb->list);
f20d09d5 2000 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2001
2002 return 0;
2003}
2004EXPORT_SYMBOL(hci_unregister_cb);
2005
2006static int hci_send_frame(struct sk_buff *skb)
2007{
2008 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2009
2010 if (!hdev) {
2011 kfree_skb(skb);
2012 return -ENODEV;
2013 }
2014
0d48d939 2015 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
2016
2017 if (atomic_read(&hdev->promisc)) {
2018 /* Time stamp */
a61bbcf2 2019 __net_timestamp(skb);
1da177e4 2020
eec8d2bc 2021 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2022 }
2023
2024 /* Get rid of skb owner, prior to sending to the driver. */
2025 skb_orphan(skb);
2026
2027 return hdev->send(skb);
2028}
2029
2030/* Send HCI command */
a9de9248 2031int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2032{
2033 int len = HCI_COMMAND_HDR_SIZE + plen;
2034 struct hci_command_hdr *hdr;
2035 struct sk_buff *skb;
2036
a9de9248 2037 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2038
2039 skb = bt_skb_alloc(len, GFP_ATOMIC);
2040 if (!skb) {
ef222013 2041 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2042 return -ENOMEM;
2043 }
2044
2045 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2046 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2047 hdr->plen = plen;
2048
2049 if (plen)
2050 memcpy(skb_put(skb, plen), param, plen);
2051
2052 BT_DBG("skb len %d", skb->len);
2053
0d48d939 2054 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2055 skb->dev = (void *) hdev;
c78ae283 2056
a5040efa
JH
2057 if (test_bit(HCI_INIT, &hdev->flags))
2058 hdev->init_last_cmd = opcode;
2059
1da177e4 2060 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2061 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2062
2063 return 0;
2064}
1da177e4
LT
2065
2066/* Get data from the previously sent command */
a9de9248 2067void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2068{
2069 struct hci_command_hdr *hdr;
2070
2071 if (!hdev->sent_cmd)
2072 return NULL;
2073
2074 hdr = (void *) hdev->sent_cmd->data;
2075
a9de9248 2076 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2077 return NULL;
2078
a9de9248 2079 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2080
2081 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2082}
2083
2084/* Send ACL data */
2085static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2086{
2087 struct hci_acl_hdr *hdr;
2088 int len = skb->len;
2089
badff6d0
ACM
2090 skb_push(skb, HCI_ACL_HDR_SIZE);
2091 skb_reset_transport_header(skb);
9c70220b 2092 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2093 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2094 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2095}
2096
73d80deb
LAD
2097static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2098 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2099{
2100 struct hci_dev *hdev = conn->hdev;
2101 struct sk_buff *list;
2102
70f23020
AE
2103 list = skb_shinfo(skb)->frag_list;
2104 if (!list) {
1da177e4
LT
2105 /* Non fragmented */
2106 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2107
73d80deb 2108 skb_queue_tail(queue, skb);
1da177e4
LT
2109 } else {
2110 /* Fragmented */
2111 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2112
2113 skb_shinfo(skb)->frag_list = NULL;
2114
2115 /* Queue all fragments atomically */
af3e6359 2116 spin_lock(&queue->lock);
1da177e4 2117
73d80deb 2118 __skb_queue_tail(queue, skb);
e702112f
AE
2119
2120 flags &= ~ACL_START;
2121 flags |= ACL_CONT;
1da177e4
LT
2122 do {
2123 skb = list; list = list->next;
8e87d142 2124
1da177e4 2125 skb->dev = (void *) hdev;
0d48d939 2126 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2127 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2128
2129 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2130
73d80deb 2131 __skb_queue_tail(queue, skb);
1da177e4
LT
2132 } while (list);
2133
af3e6359 2134 spin_unlock(&queue->lock);
1da177e4 2135 }
73d80deb
LAD
2136}
2137
2138void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2139{
2140 struct hci_conn *conn = chan->conn;
2141 struct hci_dev *hdev = conn->hdev;
2142
2143 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2144
2145 skb->dev = (void *) hdev;
2146 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2147 hci_add_acl_hdr(skb, conn->handle, flags);
2148
2149 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2150
3eff45ea 2151 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2152}
2153EXPORT_SYMBOL(hci_send_acl);
2154
2155/* Send SCO data */
0d861d8b 2156void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2157{
2158 struct hci_dev *hdev = conn->hdev;
2159 struct hci_sco_hdr hdr;
2160
2161 BT_DBG("%s len %d", hdev->name, skb->len);
2162
aca3192c 2163 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2164 hdr.dlen = skb->len;
2165
badff6d0
ACM
2166 skb_push(skb, HCI_SCO_HDR_SIZE);
2167 skb_reset_transport_header(skb);
9c70220b 2168 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2169
2170 skb->dev = (void *) hdev;
0d48d939 2171 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2172
1da177e4 2173 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2174 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2175}
2176EXPORT_SYMBOL(hci_send_sco);
2177
2178/* ---- HCI TX task (outgoing data) ---- */
2179
2180/* HCI Connection scheduler */
2181static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2182{
2183 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2184 struct hci_conn *conn = NULL, *c;
1da177e4 2185 int num = 0, min = ~0;
1da177e4 2186
8e87d142 2187 /* We don't have to lock device here. Connections are always
1da177e4 2188 * added and removed with TX task disabled. */
bf4c6325
GP
2189
2190 rcu_read_lock();
2191
2192 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2193 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2194 continue;
769be974
MH
2195
2196 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2197 continue;
2198
1da177e4
LT
2199 num++;
2200
2201 if (c->sent < min) {
2202 min = c->sent;
2203 conn = c;
2204 }
52087a79
LAD
2205
2206 if (hci_conn_num(hdev, type) == num)
2207 break;
1da177e4
LT
2208 }
2209
bf4c6325
GP
2210 rcu_read_unlock();
2211
1da177e4 2212 if (conn) {
6ed58ec5
VT
2213 int cnt, q;
2214
2215 switch (conn->type) {
2216 case ACL_LINK:
2217 cnt = hdev->acl_cnt;
2218 break;
2219 case SCO_LINK:
2220 case ESCO_LINK:
2221 cnt = hdev->sco_cnt;
2222 break;
2223 case LE_LINK:
2224 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2225 break;
2226 default:
2227 cnt = 0;
2228 BT_ERR("Unknown link type");
2229 }
2230
2231 q = cnt / num;
1da177e4
LT
2232 *quote = q ? q : 1;
2233 } else
2234 *quote = 0;
2235
2236 BT_DBG("conn %p quote %d", conn, *quote);
2237 return conn;
2238}
2239
bae1f5d9 2240static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2241{
2242 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2243 struct hci_conn *c;
1da177e4 2244
bae1f5d9 2245 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2246
bf4c6325
GP
2247 rcu_read_lock();
2248
1da177e4 2249 /* Kill stalled connections */
bf4c6325 2250 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2251 if (c->type == type && c->sent) {
2252 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2253 hdev->name, batostr(&c->dst));
2254 hci_acl_disconn(c, 0x13);
2255 }
2256 }
bf4c6325
GP
2257
2258 rcu_read_unlock();
1da177e4
LT
2259}
2260
73d80deb
LAD
2261static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2262 int *quote)
1da177e4 2263{
73d80deb
LAD
2264 struct hci_conn_hash *h = &hdev->conn_hash;
2265 struct hci_chan *chan = NULL;
2266 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2267 struct hci_conn *conn;
73d80deb
LAD
2268 int cnt, q, conn_num = 0;
2269
2270 BT_DBG("%s", hdev->name);
2271
bf4c6325
GP
2272 rcu_read_lock();
2273
2274 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2275 struct hci_chan *tmp;
2276
2277 if (conn->type != type)
2278 continue;
2279
2280 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2281 continue;
2282
2283 conn_num++;
2284
8192edef 2285 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2286 struct sk_buff *skb;
2287
2288 if (skb_queue_empty(&tmp->data_q))
2289 continue;
2290
2291 skb = skb_peek(&tmp->data_q);
2292 if (skb->priority < cur_prio)
2293 continue;
2294
2295 if (skb->priority > cur_prio) {
2296 num = 0;
2297 min = ~0;
2298 cur_prio = skb->priority;
2299 }
2300
2301 num++;
2302
2303 if (conn->sent < min) {
2304 min = conn->sent;
2305 chan = tmp;
2306 }
2307 }
2308
2309 if (hci_conn_num(hdev, type) == conn_num)
2310 break;
2311 }
2312
bf4c6325
GP
2313 rcu_read_unlock();
2314
73d80deb
LAD
2315 if (!chan)
2316 return NULL;
2317
2318 switch (chan->conn->type) {
2319 case ACL_LINK:
2320 cnt = hdev->acl_cnt;
2321 break;
2322 case SCO_LINK:
2323 case ESCO_LINK:
2324 cnt = hdev->sco_cnt;
2325 break;
2326 case LE_LINK:
2327 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2328 break;
2329 default:
2330 cnt = 0;
2331 BT_ERR("Unknown link type");
2332 }
2333
2334 q = cnt / num;
2335 *quote = q ? q : 1;
2336 BT_DBG("chan %p quote %d", chan, *quote);
2337 return chan;
2338}
2339
02b20f0b
LAD
2340static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2341{
2342 struct hci_conn_hash *h = &hdev->conn_hash;
2343 struct hci_conn *conn;
2344 int num = 0;
2345
2346 BT_DBG("%s", hdev->name);
2347
bf4c6325
GP
2348 rcu_read_lock();
2349
2350 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2351 struct hci_chan *chan;
2352
2353 if (conn->type != type)
2354 continue;
2355
2356 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2357 continue;
2358
2359 num++;
2360
8192edef 2361 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2362 struct sk_buff *skb;
2363
2364 if (chan->sent) {
2365 chan->sent = 0;
2366 continue;
2367 }
2368
2369 if (skb_queue_empty(&chan->data_q))
2370 continue;
2371
2372 skb = skb_peek(&chan->data_q);
2373 if (skb->priority >= HCI_PRIO_MAX - 1)
2374 continue;
2375
2376 skb->priority = HCI_PRIO_MAX - 1;
2377
2378 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2379 skb->priority);
2380 }
2381
2382 if (hci_conn_num(hdev, type) == num)
2383 break;
2384 }
bf4c6325
GP
2385
2386 rcu_read_unlock();
2387
02b20f0b
LAD
2388}
2389
b71d385a
AE
2390static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2391{
2392 /* Calculate count of blocks used by this packet */
2393 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2394}
2395
63d2bc1b 2396static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2397{
1da177e4
LT
2398 if (!test_bit(HCI_RAW, &hdev->flags)) {
2399 /* ACL tx timeout must be longer than maximum
2400 * link supervision timeout (40.9 seconds) */
63d2bc1b 2401 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
cc48dc0a 2402 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2403 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2404 }
63d2bc1b 2405}
1da177e4 2406
63d2bc1b
AE
2407static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2408{
2409 unsigned int cnt = hdev->acl_cnt;
2410 struct hci_chan *chan;
2411 struct sk_buff *skb;
2412 int quote;
2413
2414 __check_timeout(hdev, cnt);
04837f64 2415
73d80deb
LAD
2416 while (hdev->acl_cnt &&
2417 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2418 u32 priority = (skb_peek(&chan->data_q))->priority;
2419 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2420 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2421 skb->len, skb->priority);
2422
ec1cce24
LAD
2423 /* Stop if priority has changed */
2424 if (skb->priority < priority)
2425 break;
2426
2427 skb = skb_dequeue(&chan->data_q);
2428
73d80deb
LAD
2429 hci_conn_enter_active_mode(chan->conn,
2430 bt_cb(skb)->force_active);
04837f64 2431
1da177e4
LT
2432 hci_send_frame(skb);
2433 hdev->acl_last_tx = jiffies;
2434
2435 hdev->acl_cnt--;
73d80deb
LAD
2436 chan->sent++;
2437 chan->conn->sent++;
1da177e4
LT
2438 }
2439 }
02b20f0b
LAD
2440
2441 if (cnt != hdev->acl_cnt)
2442 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2443}
2444
b71d385a
AE
2445static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2446{
63d2bc1b 2447 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2448 struct hci_chan *chan;
2449 struct sk_buff *skb;
2450 int quote;
b71d385a 2451
63d2bc1b 2452 __check_timeout(hdev, cnt);
b71d385a
AE
2453
2454 while (hdev->block_cnt > 0 &&
2455 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2456 u32 priority = (skb_peek(&chan->data_q))->priority;
2457 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2458 int blocks;
2459
2460 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2461 skb->len, skb->priority);
2462
2463 /* Stop if priority has changed */
2464 if (skb->priority < priority)
2465 break;
2466
2467 skb = skb_dequeue(&chan->data_q);
2468
2469 blocks = __get_blocks(hdev, skb);
2470 if (blocks > hdev->block_cnt)
2471 return;
2472
2473 hci_conn_enter_active_mode(chan->conn,
2474 bt_cb(skb)->force_active);
2475
2476 hci_send_frame(skb);
2477 hdev->acl_last_tx = jiffies;
2478
2479 hdev->block_cnt -= blocks;
2480 quote -= blocks;
2481
2482 chan->sent += blocks;
2483 chan->conn->sent += blocks;
2484 }
2485 }
2486
2487 if (cnt != hdev->block_cnt)
2488 hci_prio_recalculate(hdev, ACL_LINK);
2489}
2490
2491static inline void hci_sched_acl(struct hci_dev *hdev)
2492{
2493 BT_DBG("%s", hdev->name);
2494
2495 if (!hci_conn_num(hdev, ACL_LINK))
2496 return;
2497
2498 switch (hdev->flow_ctl_mode) {
2499 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2500 hci_sched_acl_pkt(hdev);
2501 break;
2502
2503 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2504 hci_sched_acl_blk(hdev);
2505 break;
2506 }
2507}
2508
1da177e4
LT
2509/* Schedule SCO */
2510static inline void hci_sched_sco(struct hci_dev *hdev)
2511{
2512 struct hci_conn *conn;
2513 struct sk_buff *skb;
2514 int quote;
2515
2516 BT_DBG("%s", hdev->name);
2517
52087a79
LAD
2518 if (!hci_conn_num(hdev, SCO_LINK))
2519 return;
2520
1da177e4
LT
2521 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2522 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2523 BT_DBG("skb %p len %d", skb, skb->len);
2524 hci_send_frame(skb);
2525
2526 conn->sent++;
2527 if (conn->sent == ~0)
2528 conn->sent = 0;
2529 }
2530 }
2531}
2532
b6a0dc82
MH
2533static inline void hci_sched_esco(struct hci_dev *hdev)
2534{
2535 struct hci_conn *conn;
2536 struct sk_buff *skb;
2537 int quote;
2538
2539 BT_DBG("%s", hdev->name);
2540
52087a79
LAD
2541 if (!hci_conn_num(hdev, ESCO_LINK))
2542 return;
2543
b6a0dc82
MH
2544 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2545 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2546 BT_DBG("skb %p len %d", skb, skb->len);
2547 hci_send_frame(skb);
2548
2549 conn->sent++;
2550 if (conn->sent == ~0)
2551 conn->sent = 0;
2552 }
2553 }
2554}
2555
6ed58ec5
VT
2556static inline void hci_sched_le(struct hci_dev *hdev)
2557{
73d80deb 2558 struct hci_chan *chan;
6ed58ec5 2559 struct sk_buff *skb;
02b20f0b 2560 int quote, cnt, tmp;
6ed58ec5
VT
2561
2562 BT_DBG("%s", hdev->name);
2563
52087a79
LAD
2564 if (!hci_conn_num(hdev, LE_LINK))
2565 return;
2566
6ed58ec5
VT
2567 if (!test_bit(HCI_RAW, &hdev->flags)) {
2568 /* LE tx timeout must be longer than maximum
2569 * link supervision timeout (40.9 seconds) */
bae1f5d9 2570 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2571 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2572 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2573 }
2574
2575 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2576 tmp = cnt;
73d80deb 2577 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2578 u32 priority = (skb_peek(&chan->data_q))->priority;
2579 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2580 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2581 skb->len, skb->priority);
6ed58ec5 2582
ec1cce24
LAD
2583 /* Stop if priority has changed */
2584 if (skb->priority < priority)
2585 break;
2586
2587 skb = skb_dequeue(&chan->data_q);
2588
6ed58ec5
VT
2589 hci_send_frame(skb);
2590 hdev->le_last_tx = jiffies;
2591
2592 cnt--;
73d80deb
LAD
2593 chan->sent++;
2594 chan->conn->sent++;
6ed58ec5
VT
2595 }
2596 }
73d80deb 2597
6ed58ec5
VT
2598 if (hdev->le_pkts)
2599 hdev->le_cnt = cnt;
2600 else
2601 hdev->acl_cnt = cnt;
02b20f0b
LAD
2602
2603 if (cnt != tmp)
2604 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2605}
2606
3eff45ea 2607static void hci_tx_work(struct work_struct *work)
1da177e4 2608{
3eff45ea 2609 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2610 struct sk_buff *skb;
2611
6ed58ec5
VT
2612 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2613 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2614
2615 /* Schedule queues and send stuff to HCI driver */
2616
2617 hci_sched_acl(hdev);
2618
2619 hci_sched_sco(hdev);
2620
b6a0dc82
MH
2621 hci_sched_esco(hdev);
2622
6ed58ec5
VT
2623 hci_sched_le(hdev);
2624
1da177e4
LT
2625 /* Send next queued raw (unknown type) packet */
2626 while ((skb = skb_dequeue(&hdev->raw_q)))
2627 hci_send_frame(skb);
1da177e4
LT
2628}
2629
25985edc 2630/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2631
2632/* ACL data packet */
2633static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2634{
2635 struct hci_acl_hdr *hdr = (void *) skb->data;
2636 struct hci_conn *conn;
2637 __u16 handle, flags;
2638
2639 skb_pull(skb, HCI_ACL_HDR_SIZE);
2640
2641 handle = __le16_to_cpu(hdr->handle);
2642 flags = hci_flags(handle);
2643 handle = hci_handle(handle);
2644
2645 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2646
2647 hdev->stat.acl_rx++;
2648
2649 hci_dev_lock(hdev);
2650 conn = hci_conn_hash_lookup_handle(hdev, handle);
2651 hci_dev_unlock(hdev);
8e87d142 2652
1da177e4 2653 if (conn) {
65983fc7 2654 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2655
1da177e4 2656 /* Send to upper protocol */
686ebf28
UF
2657 l2cap_recv_acldata(conn, skb, flags);
2658 return;
1da177e4 2659 } else {
8e87d142 2660 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2661 hdev->name, handle);
2662 }
2663
2664 kfree_skb(skb);
2665}
2666
2667/* SCO data packet */
2668static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2669{
2670 struct hci_sco_hdr *hdr = (void *) skb->data;
2671 struct hci_conn *conn;
2672 __u16 handle;
2673
2674 skb_pull(skb, HCI_SCO_HDR_SIZE);
2675
2676 handle = __le16_to_cpu(hdr->handle);
2677
2678 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2679
2680 hdev->stat.sco_rx++;
2681
2682 hci_dev_lock(hdev);
2683 conn = hci_conn_hash_lookup_handle(hdev, handle);
2684 hci_dev_unlock(hdev);
2685
2686 if (conn) {
1da177e4 2687 /* Send to upper protocol */
686ebf28
UF
2688 sco_recv_scodata(conn, skb);
2689 return;
1da177e4 2690 } else {
8e87d142 2691 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2692 hdev->name, handle);
2693 }
2694
2695 kfree_skb(skb);
2696}
2697
b78752cc 2698static void hci_rx_work(struct work_struct *work)
1da177e4 2699{
b78752cc 2700 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2701 struct sk_buff *skb;
2702
2703 BT_DBG("%s", hdev->name);
2704
1da177e4
LT
2705 while ((skb = skb_dequeue(&hdev->rx_q))) {
2706 if (atomic_read(&hdev->promisc)) {
2707 /* Send copy to the sockets */
eec8d2bc 2708 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2709 }
2710
2711 if (test_bit(HCI_RAW, &hdev->flags)) {
2712 kfree_skb(skb);
2713 continue;
2714 }
2715
2716 if (test_bit(HCI_INIT, &hdev->flags)) {
2717 /* Don't process data packets in this states. */
0d48d939 2718 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2719 case HCI_ACLDATA_PKT:
2720 case HCI_SCODATA_PKT:
2721 kfree_skb(skb);
2722 continue;
3ff50b79 2723 }
1da177e4
LT
2724 }
2725
2726 /* Process frame */
0d48d939 2727 switch (bt_cb(skb)->pkt_type) {
1da177e4 2728 case HCI_EVENT_PKT:
b78752cc 2729 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2730 hci_event_packet(hdev, skb);
2731 break;
2732
2733 case HCI_ACLDATA_PKT:
2734 BT_DBG("%s ACL data packet", hdev->name);
2735 hci_acldata_packet(hdev, skb);
2736 break;
2737
2738 case HCI_SCODATA_PKT:
2739 BT_DBG("%s SCO data packet", hdev->name);
2740 hci_scodata_packet(hdev, skb);
2741 break;
2742
2743 default:
2744 kfree_skb(skb);
2745 break;
2746 }
2747 }
1da177e4
LT
2748}
2749
c347b765 2750static void hci_cmd_work(struct work_struct *work)
1da177e4 2751{
c347b765 2752 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2753 struct sk_buff *skb;
2754
2755 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2756
1da177e4 2757 /* Send queued commands */
5a08ecce
AE
2758 if (atomic_read(&hdev->cmd_cnt)) {
2759 skb = skb_dequeue(&hdev->cmd_q);
2760 if (!skb)
2761 return;
2762
7585b97a 2763 kfree_skb(hdev->sent_cmd);
1da177e4 2764
70f23020
AE
2765 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2766 if (hdev->sent_cmd) {
1da177e4
LT
2767 atomic_dec(&hdev->cmd_cnt);
2768 hci_send_frame(skb);
7bdb8a5c
SJ
2769 if (test_bit(HCI_RESET, &hdev->flags))
2770 del_timer(&hdev->cmd_timer);
2771 else
2772 mod_timer(&hdev->cmd_timer,
6bd32326 2773 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2774 } else {
2775 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2776 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2777 }
2778 }
2779}
2519a1fc
AG
2780
2781int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2782{
2783 /* General inquiry access code (GIAC) */
2784 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2785 struct hci_cp_inquiry cp;
2786
2787 BT_DBG("%s", hdev->name);
2788
2789 if (test_bit(HCI_INQUIRY, &hdev->flags))
2790 return -EINPROGRESS;
2791
4663262c
JH
2792 inquiry_cache_flush(hdev);
2793
2519a1fc
AG
2794 memset(&cp, 0, sizeof(cp));
2795 memcpy(&cp.lap, lap, sizeof(cp.lap));
2796 cp.length = length;
2797
2798 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2799}
023d5049
AG
2800
2801int hci_cancel_inquiry(struct hci_dev *hdev)
2802{
2803 BT_DBG("%s", hdev->name);
2804
2805 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2806 return -EPERM;
2807
2808 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2809}
7784d78f
AE
2810
2811module_param(enable_hs, bool, 0644);
2812MODULE_PARM_DESC(enable_hs, "Enable High Speed");