Bluetooth: Remove HCI notifier handling
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4 42#include <linux/interrupt.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
48#include <asm/system.h>
70f23020 49#include <linux/uaccess.h>
1da177e4
LT
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
ab81cbf9
JH
55#define AUTO_OFF_TIMEOUT 2000
56
8b281b9c 57bool enable_hs;
7784d78f 58
b78752cc 59static void hci_rx_work(struct work_struct *work);
c347b765 60static void hci_cmd_work(struct work_struct *work);
3eff45ea 61static void hci_tx_work(struct work_struct *work);
1da177e4 62
1da177e4
LT
63/* HCI device list */
64LIST_HEAD(hci_dev_list);
65DEFINE_RWLOCK(hci_dev_list_lock);
66
67/* HCI callback list */
68LIST_HEAD(hci_cb_list);
69DEFINE_RWLOCK(hci_cb_list_lock);
70
1da177e4
LT
71/* ---- HCI notifications ---- */
72
6516455d 73static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 74{
040030ef 75 hci_sock_dev_event(hdev, event);
1da177e4
LT
76}
77
78/* ---- HCI requests ---- */
79
23bb5763 80void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 81{
23bb5763
JH
82 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
83
a5040efa
JH
84 /* If this is the init phase check if the completed command matches
85 * the last init command, and if not just return.
86 */
87 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 88 return;
1da177e4
LT
89
90 if (hdev->req_status == HCI_REQ_PEND) {
91 hdev->req_result = result;
92 hdev->req_status = HCI_REQ_DONE;
93 wake_up_interruptible(&hdev->req_wait_q);
94 }
95}
96
97static void hci_req_cancel(struct hci_dev *hdev, int err)
98{
99 BT_DBG("%s err 0x%2.2x", hdev->name, err);
100
101 if (hdev->req_status == HCI_REQ_PEND) {
102 hdev->req_result = err;
103 hdev->req_status = HCI_REQ_CANCELED;
104 wake_up_interruptible(&hdev->req_wait_q);
105 }
106}
107
108/* Execute request and wait for completion. */
8e87d142 109static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 110 unsigned long opt, __u32 timeout)
1da177e4
LT
111{
112 DECLARE_WAITQUEUE(wait, current);
113 int err = 0;
114
115 BT_DBG("%s start", hdev->name);
116
117 hdev->req_status = HCI_REQ_PEND;
118
119 add_wait_queue(&hdev->req_wait_q, &wait);
120 set_current_state(TASK_INTERRUPTIBLE);
121
122 req(hdev, opt);
123 schedule_timeout(timeout);
124
125 remove_wait_queue(&hdev->req_wait_q, &wait);
126
127 if (signal_pending(current))
128 return -EINTR;
129
130 switch (hdev->req_status) {
131 case HCI_REQ_DONE:
e175072f 132 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
133 break;
134
135 case HCI_REQ_CANCELED:
136 err = -hdev->req_result;
137 break;
138
139 default:
140 err = -ETIMEDOUT;
141 break;
3ff50b79 142 }
1da177e4 143
a5040efa 144 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
145
146 BT_DBG("%s end: err %d", hdev->name, err);
147
148 return err;
149}
150
151static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 152 unsigned long opt, __u32 timeout)
1da177e4
LT
153{
154 int ret;
155
7c6a329e
MH
156 if (!test_bit(HCI_UP, &hdev->flags))
157 return -ENETDOWN;
158
1da177e4
LT
159 /* Serialize all requests */
160 hci_req_lock(hdev);
161 ret = __hci_request(hdev, req, opt, timeout);
162 hci_req_unlock(hdev);
163
164 return ret;
165}
166
167static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
168{
169 BT_DBG("%s %ld", hdev->name, opt);
170
171 /* Reset device */
f630cf0d 172 set_bit(HCI_RESET, &hdev->flags);
a9de9248 173 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
174}
175
e61ef499 176static void bredr_init(struct hci_dev *hdev)
1da177e4 177{
b0916ea0 178 struct hci_cp_delete_stored_link_key cp;
1ebb9252 179 __le16 param;
89f2783d 180 __u8 flt_type;
1da177e4 181
2455a3ea
AE
182 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
183
1da177e4
LT
184 /* Mandatory initialization */
185
186 /* Reset */
f630cf0d 187 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
188 set_bit(HCI_RESET, &hdev->flags);
189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 190 }
1da177e4
LT
191
192 /* Read Local Supported Features */
a9de9248 193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 194
1143e5a6 195 /* Read Local Version */
a9de9248 196 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 197
1da177e4 198 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 199 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 200
1da177e4 201 /* Read BD Address */
a9de9248
MH
202 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
203
204 /* Read Class of Device */
205 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
206
207 /* Read Local Name */
208 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
209
210 /* Read Voice Setting */
a9de9248 211 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
212
213 /* Optional initialization */
214
215 /* Clear Event Filters */
89f2783d 216 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 217 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 218
1da177e4 219 /* Connection accept timeout ~20 secs */
aca3192c 220 param = cpu_to_le16(0x7d00);
a9de9248 221 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
222
223 bacpy(&cp.bdaddr, BDADDR_ANY);
224 cp.delete_all = 1;
225 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
226}
227
e61ef499
AE
228static void amp_init(struct hci_dev *hdev)
229{
2455a3ea
AE
230 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
231
e61ef499
AE
232 /* Reset */
233 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
234
235 /* Read Local Version */
236 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
237}
238
239static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
240{
241 struct sk_buff *skb;
242
243 BT_DBG("%s %ld", hdev->name, opt);
244
245 /* Driver initialization */
246
247 /* Special commands */
248 while ((skb = skb_dequeue(&hdev->driver_init))) {
249 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
250 skb->dev = (void *) hdev;
251
252 skb_queue_tail(&hdev->cmd_q, skb);
253 queue_work(hdev->workqueue, &hdev->cmd_work);
254 }
255 skb_queue_purge(&hdev->driver_init);
256
257 switch (hdev->dev_type) {
258 case HCI_BREDR:
259 bredr_init(hdev);
260 break;
261
262 case HCI_AMP:
263 amp_init(hdev);
264 break;
265
266 default:
267 BT_ERR("Unknown device type %d", hdev->dev_type);
268 break;
269 }
270
271}
272
6ed58ec5
VT
273static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
274{
275 BT_DBG("%s", hdev->name);
276
277 /* Read LE buffer size */
278 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
279}
280
1da177e4
LT
281static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
282{
283 __u8 scan = opt;
284
285 BT_DBG("%s %x", hdev->name, scan);
286
287 /* Inquiry and Page scans */
a9de9248 288 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
289}
290
291static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
292{
293 __u8 auth = opt;
294
295 BT_DBG("%s %x", hdev->name, auth);
296
297 /* Authentication */
a9de9248 298 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
299}
300
301static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
302{
303 __u8 encrypt = opt;
304
305 BT_DBG("%s %x", hdev->name, encrypt);
306
e4e8e37c 307 /* Encryption */
a9de9248 308 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
309}
310
e4e8e37c
MH
311static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
312{
313 __le16 policy = cpu_to_le16(opt);
314
a418b893 315 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
316
317 /* Default link policy */
318 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
319}
320
8e87d142 321/* Get HCI device by index.
1da177e4
LT
322 * Device is held on return. */
323struct hci_dev *hci_dev_get(int index)
324{
8035ded4 325 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
326
327 BT_DBG("%d", index);
328
329 if (index < 0)
330 return NULL;
331
332 read_lock(&hci_dev_list_lock);
8035ded4 333 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
334 if (d->id == index) {
335 hdev = hci_dev_hold(d);
336 break;
337 }
338 }
339 read_unlock(&hci_dev_list_lock);
340 return hdev;
341}
1da177e4
LT
342
343/* ---- Inquiry support ---- */
ff9ef578 344
30dc78e1
JH
345bool hci_discovery_active(struct hci_dev *hdev)
346{
347 struct discovery_state *discov = &hdev->discovery;
348
6fbe195d 349 switch (discov->state) {
343f935b 350 case DISCOVERY_FINDING:
6fbe195d 351 case DISCOVERY_RESOLVING:
30dc78e1
JH
352 return true;
353
6fbe195d
AG
354 default:
355 return false;
356 }
30dc78e1
JH
357}
358
ff9ef578
JH
359void hci_discovery_set_state(struct hci_dev *hdev, int state)
360{
361 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
362
363 if (hdev->discovery.state == state)
364 return;
365
366 switch (state) {
367 case DISCOVERY_STOPPED:
4aab14e5
AG
368 hdev->discovery.type = 0;
369
7b99b659
AG
370 if (hdev->discovery.state != DISCOVERY_STARTING)
371 mgmt_discovering(hdev, 0);
ff9ef578
JH
372 break;
373 case DISCOVERY_STARTING:
374 break;
343f935b 375 case DISCOVERY_FINDING:
ff9ef578
JH
376 mgmt_discovering(hdev, 1);
377 break;
30dc78e1
JH
378 case DISCOVERY_RESOLVING:
379 break;
ff9ef578
JH
380 case DISCOVERY_STOPPING:
381 break;
382 }
383
384 hdev->discovery.state = state;
385}
386
1da177e4
LT
387static void inquiry_cache_flush(struct hci_dev *hdev)
388{
30883512 389 struct discovery_state *cache = &hdev->discovery;
b57c1a56 390 struct inquiry_entry *p, *n;
1da177e4 391
561aafbc
JH
392 list_for_each_entry_safe(p, n, &cache->all, all) {
393 list_del(&p->all);
b57c1a56 394 kfree(p);
1da177e4 395 }
561aafbc
JH
396
397 INIT_LIST_HEAD(&cache->unknown);
398 INIT_LIST_HEAD(&cache->resolve);
ff9ef578 399 cache->state = DISCOVERY_STOPPED;
1da177e4
LT
400}
401
402struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
403{
30883512 404 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
405 struct inquiry_entry *e;
406
407 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
408
561aafbc
JH
409 list_for_each_entry(e, &cache->all, all) {
410 if (!bacmp(&e->data.bdaddr, bdaddr))
411 return e;
412 }
413
414 return NULL;
415}
416
417struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
418 bdaddr_t *bdaddr)
419{
30883512 420 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
421 struct inquiry_entry *e;
422
423 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424
425 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 426 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
427 return e;
428 }
429
430 return NULL;
1da177e4
LT
431}
432
30dc78e1
JH
433struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
434 bdaddr_t *bdaddr,
435 int state)
436{
437 struct discovery_state *cache = &hdev->discovery;
438 struct inquiry_entry *e;
439
440 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
441
442 list_for_each_entry(e, &cache->resolve, list) {
443 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
444 return e;
445 if (!bacmp(&e->data.bdaddr, bdaddr))
446 return e;
447 }
448
449 return NULL;
450}
451
a3d4e20a
JH
452void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
453 struct inquiry_entry *ie)
454{
455 struct discovery_state *cache = &hdev->discovery;
456 struct list_head *pos = &cache->resolve;
457 struct inquiry_entry *p;
458
459 list_del(&ie->list);
460
461 list_for_each_entry(p, &cache->resolve, list) {
462 if (p->name_state != NAME_PENDING &&
463 abs(p->data.rssi) >= abs(ie->data.rssi))
464 break;
465 pos = &p->list;
466 }
467
468 list_add(&ie->list, pos);
469}
470
3175405b 471bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
561aafbc 472 bool name_known)
1da177e4 473{
30883512 474 struct discovery_state *cache = &hdev->discovery;
70f23020 475 struct inquiry_entry *ie;
1da177e4
LT
476
477 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
478
70f23020 479 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a
JH
480 if (ie) {
481 if (ie->name_state == NAME_NEEDED &&
482 data->rssi != ie->data.rssi) {
483 ie->data.rssi = data->rssi;
484 hci_inquiry_cache_update_resolve(hdev, ie);
485 }
486
561aafbc 487 goto update;
a3d4e20a 488 }
561aafbc
JH
489
490 /* Entry not in the cache. Add new one. */
491 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
492 if (!ie)
3175405b 493 return false;
561aafbc
JH
494
495 list_add(&ie->all, &cache->all);
496
497 if (name_known) {
498 ie->name_state = NAME_KNOWN;
499 } else {
500 ie->name_state = NAME_NOT_KNOWN;
501 list_add(&ie->list, &cache->unknown);
502 }
70f23020 503
561aafbc
JH
504update:
505 if (name_known && ie->name_state != NAME_KNOWN &&
506 ie->name_state != NAME_PENDING) {
507 ie->name_state = NAME_KNOWN;
508 list_del(&ie->list);
1da177e4
LT
509 }
510
70f23020
AE
511 memcpy(&ie->data, data, sizeof(*data));
512 ie->timestamp = jiffies;
1da177e4 513 cache->timestamp = jiffies;
3175405b
JH
514
515 if (ie->name_state == NAME_NOT_KNOWN)
516 return false;
517
518 return true;
1da177e4
LT
519}
520
521static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
522{
30883512 523 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
524 struct inquiry_info *info = (struct inquiry_info *) buf;
525 struct inquiry_entry *e;
526 int copied = 0;
527
561aafbc 528 list_for_each_entry(e, &cache->all, all) {
1da177e4 529 struct inquiry_data *data = &e->data;
b57c1a56
JH
530
531 if (copied >= num)
532 break;
533
1da177e4
LT
534 bacpy(&info->bdaddr, &data->bdaddr);
535 info->pscan_rep_mode = data->pscan_rep_mode;
536 info->pscan_period_mode = data->pscan_period_mode;
537 info->pscan_mode = data->pscan_mode;
538 memcpy(info->dev_class, data->dev_class, 3);
539 info->clock_offset = data->clock_offset;
b57c1a56 540
1da177e4 541 info++;
b57c1a56 542 copied++;
1da177e4
LT
543 }
544
545 BT_DBG("cache %p, copied %d", cache, copied);
546 return copied;
547}
548
549static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
550{
551 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
552 struct hci_cp_inquiry cp;
553
554 BT_DBG("%s", hdev->name);
555
556 if (test_bit(HCI_INQUIRY, &hdev->flags))
557 return;
558
559 /* Start Inquiry */
560 memcpy(&cp.lap, &ir->lap, 3);
561 cp.length = ir->length;
562 cp.num_rsp = ir->num_rsp;
a9de9248 563 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
564}
565
566int hci_inquiry(void __user *arg)
567{
568 __u8 __user *ptr = arg;
569 struct hci_inquiry_req ir;
570 struct hci_dev *hdev;
571 int err = 0, do_inquiry = 0, max_rsp;
572 long timeo;
573 __u8 *buf;
574
575 if (copy_from_user(&ir, ptr, sizeof(ir)))
576 return -EFAULT;
577
5a08ecce
AE
578 hdev = hci_dev_get(ir.dev_id);
579 if (!hdev)
1da177e4
LT
580 return -ENODEV;
581
09fd0de5 582 hci_dev_lock(hdev);
8e87d142 583 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
584 inquiry_cache_empty(hdev) ||
585 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
586 inquiry_cache_flush(hdev);
587 do_inquiry = 1;
588 }
09fd0de5 589 hci_dev_unlock(hdev);
1da177e4 590
04837f64 591 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
592
593 if (do_inquiry) {
594 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
595 if (err < 0)
596 goto done;
597 }
1da177e4
LT
598
599 /* for unlimited number of responses we will use buffer with 255 entries */
600 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
601
602 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
603 * copy it to the user space.
604 */
01df8c31 605 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 606 if (!buf) {
1da177e4
LT
607 err = -ENOMEM;
608 goto done;
609 }
610
09fd0de5 611 hci_dev_lock(hdev);
1da177e4 612 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 613 hci_dev_unlock(hdev);
1da177e4
LT
614
615 BT_DBG("num_rsp %d", ir.num_rsp);
616
617 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
618 ptr += sizeof(ir);
619 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
620 ir.num_rsp))
621 err = -EFAULT;
8e87d142 622 } else
1da177e4
LT
623 err = -EFAULT;
624
625 kfree(buf);
626
627done:
628 hci_dev_put(hdev);
629 return err;
630}
631
632/* ---- HCI ioctl helpers ---- */
633
634int hci_dev_open(__u16 dev)
635{
636 struct hci_dev *hdev;
637 int ret = 0;
638
5a08ecce
AE
639 hdev = hci_dev_get(dev);
640 if (!hdev)
1da177e4
LT
641 return -ENODEV;
642
643 BT_DBG("%s %p", hdev->name, hdev);
644
645 hci_req_lock(hdev);
646
611b30f7
MH
647 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
648 ret = -ERFKILL;
649 goto done;
650 }
651
1da177e4
LT
652 if (test_bit(HCI_UP, &hdev->flags)) {
653 ret = -EALREADY;
654 goto done;
655 }
656
657 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
658 set_bit(HCI_RAW, &hdev->flags);
659
07e3b94a
AE
660 /* Treat all non BR/EDR controllers as raw devices if
661 enable_hs is not set */
662 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
663 set_bit(HCI_RAW, &hdev->flags);
664
1da177e4
LT
665 if (hdev->open(hdev)) {
666 ret = -EIO;
667 goto done;
668 }
669
670 if (!test_bit(HCI_RAW, &hdev->flags)) {
671 atomic_set(&hdev->cmd_cnt, 1);
672 set_bit(HCI_INIT, &hdev->flags);
a5040efa 673 hdev->init_last_cmd = 0;
1da177e4 674
04837f64
MH
675 ret = __hci_request(hdev, hci_init_req, 0,
676 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 677
eead27da 678 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
679 ret = __hci_request(hdev, hci_le_init_req, 0,
680 msecs_to_jiffies(HCI_INIT_TIMEOUT));
681
1da177e4
LT
682 clear_bit(HCI_INIT, &hdev->flags);
683 }
684
685 if (!ret) {
686 hci_dev_hold(hdev);
687 set_bit(HCI_UP, &hdev->flags);
688 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 689 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 690 hci_dev_lock(hdev);
744cf19e 691 mgmt_powered(hdev, 1);
09fd0de5 692 hci_dev_unlock(hdev);
56e5cb86 693 }
8e87d142 694 } else {
1da177e4 695 /* Init failed, cleanup */
3eff45ea 696 flush_work(&hdev->tx_work);
c347b765 697 flush_work(&hdev->cmd_work);
b78752cc 698 flush_work(&hdev->rx_work);
1da177e4
LT
699
700 skb_queue_purge(&hdev->cmd_q);
701 skb_queue_purge(&hdev->rx_q);
702
703 if (hdev->flush)
704 hdev->flush(hdev);
705
706 if (hdev->sent_cmd) {
707 kfree_skb(hdev->sent_cmd);
708 hdev->sent_cmd = NULL;
709 }
710
711 hdev->close(hdev);
712 hdev->flags = 0;
713 }
714
715done:
716 hci_req_unlock(hdev);
717 hci_dev_put(hdev);
718 return ret;
719}
720
721static int hci_dev_do_close(struct hci_dev *hdev)
722{
723 BT_DBG("%s %p", hdev->name, hdev);
724
28b75a89
AG
725 cancel_work_sync(&hdev->le_scan);
726
1da177e4
LT
727 hci_req_cancel(hdev, ENODEV);
728 hci_req_lock(hdev);
729
730 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 731 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
732 hci_req_unlock(hdev);
733 return 0;
734 }
735
3eff45ea
GP
736 /* Flush RX and TX works */
737 flush_work(&hdev->tx_work);
b78752cc 738 flush_work(&hdev->rx_work);
1da177e4 739
16ab91ab 740 if (hdev->discov_timeout > 0) {
e0f9309f 741 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
742 hdev->discov_timeout = 0;
743 }
744
a8b2d5c2 745 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 746 cancel_delayed_work(&hdev->power_off);
3243553f 747
a8b2d5c2 748 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
749 cancel_delayed_work(&hdev->service_cache);
750
7ba8b4be
AG
751 cancel_delayed_work_sync(&hdev->le_scan_disable);
752
09fd0de5 753 hci_dev_lock(hdev);
1da177e4
LT
754 inquiry_cache_flush(hdev);
755 hci_conn_hash_flush(hdev);
09fd0de5 756 hci_dev_unlock(hdev);
1da177e4
LT
757
758 hci_notify(hdev, HCI_DEV_DOWN);
759
760 if (hdev->flush)
761 hdev->flush(hdev);
762
763 /* Reset device */
764 skb_queue_purge(&hdev->cmd_q);
765 atomic_set(&hdev->cmd_cnt, 1);
8af59467
JH
766 if (!test_bit(HCI_RAW, &hdev->flags) &&
767 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
1da177e4 768 set_bit(HCI_INIT, &hdev->flags);
04837f64 769 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 770 msecs_to_jiffies(250));
1da177e4
LT
771 clear_bit(HCI_INIT, &hdev->flags);
772 }
773
c347b765
GP
774 /* flush cmd work */
775 flush_work(&hdev->cmd_work);
1da177e4
LT
776
777 /* Drop queues */
778 skb_queue_purge(&hdev->rx_q);
779 skb_queue_purge(&hdev->cmd_q);
780 skb_queue_purge(&hdev->raw_q);
781
782 /* Drop last sent command */
783 if (hdev->sent_cmd) {
b79f44c1 784 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
785 kfree_skb(hdev->sent_cmd);
786 hdev->sent_cmd = NULL;
787 }
788
789 /* After this point our queues are empty
790 * and no tasks are scheduled. */
791 hdev->close(hdev);
792
09fd0de5 793 hci_dev_lock(hdev);
744cf19e 794 mgmt_powered(hdev, 0);
09fd0de5 795 hci_dev_unlock(hdev);
5add6af8 796
1da177e4
LT
797 /* Clear flags */
798 hdev->flags = 0;
799
800 hci_req_unlock(hdev);
801
802 hci_dev_put(hdev);
803 return 0;
804}
805
806int hci_dev_close(__u16 dev)
807{
808 struct hci_dev *hdev;
809 int err;
810
70f23020
AE
811 hdev = hci_dev_get(dev);
812 if (!hdev)
1da177e4
LT
813 return -ENODEV;
814 err = hci_dev_do_close(hdev);
815 hci_dev_put(hdev);
816 return err;
817}
818
819int hci_dev_reset(__u16 dev)
820{
821 struct hci_dev *hdev;
822 int ret = 0;
823
70f23020
AE
824 hdev = hci_dev_get(dev);
825 if (!hdev)
1da177e4
LT
826 return -ENODEV;
827
828 hci_req_lock(hdev);
1da177e4
LT
829
830 if (!test_bit(HCI_UP, &hdev->flags))
831 goto done;
832
833 /* Drop queues */
834 skb_queue_purge(&hdev->rx_q);
835 skb_queue_purge(&hdev->cmd_q);
836
09fd0de5 837 hci_dev_lock(hdev);
1da177e4
LT
838 inquiry_cache_flush(hdev);
839 hci_conn_hash_flush(hdev);
09fd0de5 840 hci_dev_unlock(hdev);
1da177e4
LT
841
842 if (hdev->flush)
843 hdev->flush(hdev);
844
8e87d142 845 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 846 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
847
848 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
849 ret = __hci_request(hdev, hci_reset_req, 0,
850 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
851
852done:
1da177e4
LT
853 hci_req_unlock(hdev);
854 hci_dev_put(hdev);
855 return ret;
856}
857
858int hci_dev_reset_stat(__u16 dev)
859{
860 struct hci_dev *hdev;
861 int ret = 0;
862
70f23020
AE
863 hdev = hci_dev_get(dev);
864 if (!hdev)
1da177e4
LT
865 return -ENODEV;
866
867 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
868
869 hci_dev_put(hdev);
870
871 return ret;
872}
873
874int hci_dev_cmd(unsigned int cmd, void __user *arg)
875{
876 struct hci_dev *hdev;
877 struct hci_dev_req dr;
878 int err = 0;
879
880 if (copy_from_user(&dr, arg, sizeof(dr)))
881 return -EFAULT;
882
70f23020
AE
883 hdev = hci_dev_get(dr.dev_id);
884 if (!hdev)
1da177e4
LT
885 return -ENODEV;
886
887 switch (cmd) {
888 case HCISETAUTH:
04837f64
MH
889 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
890 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
891 break;
892
893 case HCISETENCRYPT:
894 if (!lmp_encrypt_capable(hdev)) {
895 err = -EOPNOTSUPP;
896 break;
897 }
898
899 if (!test_bit(HCI_AUTH, &hdev->flags)) {
900 /* Auth must be enabled first */
04837f64
MH
901 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
902 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
903 if (err)
904 break;
905 }
906
04837f64
MH
907 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
908 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
909 break;
910
911 case HCISETSCAN:
04837f64
MH
912 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
913 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
914 break;
915
1da177e4 916 case HCISETLINKPOL:
e4e8e37c
MH
917 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
918 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
919 break;
920
921 case HCISETLINKMODE:
e4e8e37c
MH
922 hdev->link_mode = ((__u16) dr.dev_opt) &
923 (HCI_LM_MASTER | HCI_LM_ACCEPT);
924 break;
925
926 case HCISETPTYPE:
927 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
928 break;
929
930 case HCISETACLMTU:
e4e8e37c
MH
931 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
932 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
933 break;
934
935 case HCISETSCOMTU:
e4e8e37c
MH
936 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
937 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
938 break;
939
940 default:
941 err = -EINVAL;
942 break;
943 }
e4e8e37c 944
1da177e4
LT
945 hci_dev_put(hdev);
946 return err;
947}
948
949int hci_get_dev_list(void __user *arg)
950{
8035ded4 951 struct hci_dev *hdev;
1da177e4
LT
952 struct hci_dev_list_req *dl;
953 struct hci_dev_req *dr;
1da177e4
LT
954 int n = 0, size, err;
955 __u16 dev_num;
956
957 if (get_user(dev_num, (__u16 __user *) arg))
958 return -EFAULT;
959
960 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
961 return -EINVAL;
962
963 size = sizeof(*dl) + dev_num * sizeof(*dr);
964
70f23020
AE
965 dl = kzalloc(size, GFP_KERNEL);
966 if (!dl)
1da177e4
LT
967 return -ENOMEM;
968
969 dr = dl->dev_req;
970
f20d09d5 971 read_lock(&hci_dev_list_lock);
8035ded4 972 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 973 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 974 cancel_delayed_work(&hdev->power_off);
c542a06c 975
a8b2d5c2
JH
976 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
977 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 978
1da177e4
LT
979 (dr + n)->dev_id = hdev->id;
980 (dr + n)->dev_opt = hdev->flags;
c542a06c 981
1da177e4
LT
982 if (++n >= dev_num)
983 break;
984 }
f20d09d5 985 read_unlock(&hci_dev_list_lock);
1da177e4
LT
986
987 dl->dev_num = n;
988 size = sizeof(*dl) + n * sizeof(*dr);
989
990 err = copy_to_user(arg, dl, size);
991 kfree(dl);
992
993 return err ? -EFAULT : 0;
994}
995
996int hci_get_dev_info(void __user *arg)
997{
998 struct hci_dev *hdev;
999 struct hci_dev_info di;
1000 int err = 0;
1001
1002 if (copy_from_user(&di, arg, sizeof(di)))
1003 return -EFAULT;
1004
70f23020
AE
1005 hdev = hci_dev_get(di.dev_id);
1006 if (!hdev)
1da177e4
LT
1007 return -ENODEV;
1008
a8b2d5c2 1009 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1010 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1011
a8b2d5c2
JH
1012 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1013 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1014
1da177e4
LT
1015 strcpy(di.name, hdev->name);
1016 di.bdaddr = hdev->bdaddr;
943da25d 1017 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1018 di.flags = hdev->flags;
1019 di.pkt_type = hdev->pkt_type;
1020 di.acl_mtu = hdev->acl_mtu;
1021 di.acl_pkts = hdev->acl_pkts;
1022 di.sco_mtu = hdev->sco_mtu;
1023 di.sco_pkts = hdev->sco_pkts;
1024 di.link_policy = hdev->link_policy;
1025 di.link_mode = hdev->link_mode;
1026
1027 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1028 memcpy(&di.features, &hdev->features, sizeof(di.features));
1029
1030 if (copy_to_user(arg, &di, sizeof(di)))
1031 err = -EFAULT;
1032
1033 hci_dev_put(hdev);
1034
1035 return err;
1036}
1037
1038/* ---- Interface to HCI drivers ---- */
1039
611b30f7
MH
1040static int hci_rfkill_set_block(void *data, bool blocked)
1041{
1042 struct hci_dev *hdev = data;
1043
1044 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1045
1046 if (!blocked)
1047 return 0;
1048
1049 hci_dev_do_close(hdev);
1050
1051 return 0;
1052}
1053
1054static const struct rfkill_ops hci_rfkill_ops = {
1055 .set_block = hci_rfkill_set_block,
1056};
1057
1da177e4
LT
1058/* Alloc HCI device */
1059struct hci_dev *hci_alloc_dev(void)
1060{
1061 struct hci_dev *hdev;
1062
25ea6db0 1063 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
1064 if (!hdev)
1065 return NULL;
1066
0ac7e700 1067 hci_init_sysfs(hdev);
1da177e4
LT
1068 skb_queue_head_init(&hdev->driver_init);
1069
1070 return hdev;
1071}
1072EXPORT_SYMBOL(hci_alloc_dev);
1073
1074/* Free HCI device */
1075void hci_free_dev(struct hci_dev *hdev)
1076{
1077 skb_queue_purge(&hdev->driver_init);
1078
a91f2e39
MH
1079 /* will free via device release */
1080 put_device(&hdev->dev);
1da177e4
LT
1081}
1082EXPORT_SYMBOL(hci_free_dev);
1083
ab81cbf9
JH
1084static void hci_power_on(struct work_struct *work)
1085{
1086 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1087
1088 BT_DBG("%s", hdev->name);
1089
1090 if (hci_dev_open(hdev->id) < 0)
1091 return;
1092
a8b2d5c2 1093 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1094 schedule_delayed_work(&hdev->power_off,
3243553f 1095 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1096
a8b2d5c2 1097 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1098 mgmt_index_added(hdev);
ab81cbf9
JH
1099}
1100
1101static void hci_power_off(struct work_struct *work)
1102{
3243553f
JH
1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 power_off.work);
ab81cbf9
JH
1105
1106 BT_DBG("%s", hdev->name);
1107
a8b2d5c2 1108 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ab81cbf9 1109
3243553f 1110 hci_dev_close(hdev->id);
ab81cbf9
JH
1111}
1112
16ab91ab
JH
1113static void hci_discov_off(struct work_struct *work)
1114{
1115 struct hci_dev *hdev;
1116 u8 scan = SCAN_PAGE;
1117
1118 hdev = container_of(work, struct hci_dev, discov_off.work);
1119
1120 BT_DBG("%s", hdev->name);
1121
09fd0de5 1122 hci_dev_lock(hdev);
16ab91ab
JH
1123
1124 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1125
1126 hdev->discov_timeout = 0;
1127
09fd0de5 1128 hci_dev_unlock(hdev);
16ab91ab
JH
1129}
1130
2aeb9a1a
JH
1131int hci_uuids_clear(struct hci_dev *hdev)
1132{
1133 struct list_head *p, *n;
1134
1135 list_for_each_safe(p, n, &hdev->uuids) {
1136 struct bt_uuid *uuid;
1137
1138 uuid = list_entry(p, struct bt_uuid, list);
1139
1140 list_del(p);
1141 kfree(uuid);
1142 }
1143
1144 return 0;
1145}
1146
55ed8ca1
JH
1147int hci_link_keys_clear(struct hci_dev *hdev)
1148{
1149 struct list_head *p, *n;
1150
1151 list_for_each_safe(p, n, &hdev->link_keys) {
1152 struct link_key *key;
1153
1154 key = list_entry(p, struct link_key, list);
1155
1156 list_del(p);
1157 kfree(key);
1158 }
1159
1160 return 0;
1161}
1162
b899efaf
VCG
1163int hci_smp_ltks_clear(struct hci_dev *hdev)
1164{
1165 struct smp_ltk *k, *tmp;
1166
1167 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1168 list_del(&k->list);
1169 kfree(k);
1170 }
1171
1172 return 0;
1173}
1174
55ed8ca1
JH
1175struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1176{
8035ded4 1177 struct link_key *k;
55ed8ca1 1178
8035ded4 1179 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1180 if (bacmp(bdaddr, &k->bdaddr) == 0)
1181 return k;
55ed8ca1
JH
1182
1183 return NULL;
1184}
1185
d25e28ab
JH
1186static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1187 u8 key_type, u8 old_key_type)
1188{
1189 /* Legacy key */
1190 if (key_type < 0x03)
1191 return 1;
1192
1193 /* Debug keys are insecure so don't store them persistently */
1194 if (key_type == HCI_LK_DEBUG_COMBINATION)
1195 return 0;
1196
1197 /* Changed combination key and there's no previous one */
1198 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1199 return 0;
1200
1201 /* Security mode 3 case */
1202 if (!conn)
1203 return 1;
1204
1205 /* Neither local nor remote side had no-bonding as requirement */
1206 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1207 return 1;
1208
1209 /* Local side had dedicated bonding as requirement */
1210 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1211 return 1;
1212
1213 /* Remote side had dedicated bonding as requirement */
1214 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1215 return 1;
1216
1217 /* If none of the above criteria match, then don't store the key
1218 * persistently */
1219 return 0;
1220}
1221
c9839a11 1222struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1223{
c9839a11 1224 struct smp_ltk *k;
75d262c2 1225
c9839a11
VCG
1226 list_for_each_entry(k, &hdev->long_term_keys, list) {
1227 if (k->ediv != ediv ||
1228 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1229 continue;
1230
c9839a11 1231 return k;
75d262c2
VCG
1232 }
1233
1234 return NULL;
1235}
1236EXPORT_SYMBOL(hci_find_ltk);
1237
c9839a11
VCG
1238struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1239 u8 addr_type)
75d262c2 1240{
c9839a11 1241 struct smp_ltk *k;
75d262c2 1242
c9839a11
VCG
1243 list_for_each_entry(k, &hdev->long_term_keys, list)
1244 if (addr_type == k->bdaddr_type &&
1245 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1246 return k;
1247
1248 return NULL;
1249}
c9839a11 1250EXPORT_SYMBOL(hci_find_ltk_by_addr);
75d262c2 1251
d25e28ab
JH
1252int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1253 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1254{
1255 struct link_key *key, *old_key;
4df378a1 1256 u8 old_key_type, persistent;
55ed8ca1
JH
1257
1258 old_key = hci_find_link_key(hdev, bdaddr);
1259 if (old_key) {
1260 old_key_type = old_key->type;
1261 key = old_key;
1262 } else {
12adcf3a 1263 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1264 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1265 if (!key)
1266 return -ENOMEM;
1267 list_add(&key->list, &hdev->link_keys);
1268 }
1269
1270 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1271
d25e28ab
JH
1272 /* Some buggy controller combinations generate a changed
1273 * combination key for legacy pairing even when there's no
1274 * previous key */
1275 if (type == HCI_LK_CHANGED_COMBINATION &&
1276 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1277 old_key_type == 0xff) {
d25e28ab 1278 type = HCI_LK_COMBINATION;
655fe6ec
JH
1279 if (conn)
1280 conn->key_type = type;
1281 }
d25e28ab 1282
55ed8ca1
JH
1283 bacpy(&key->bdaddr, bdaddr);
1284 memcpy(key->val, val, 16);
55ed8ca1
JH
1285 key->pin_len = pin_len;
1286
b6020ba0 1287 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1288 key->type = old_key_type;
4748fed2
JH
1289 else
1290 key->type = type;
1291
4df378a1
JH
1292 if (!new_key)
1293 return 0;
1294
1295 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1296
744cf19e 1297 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1298
1299 if (!persistent) {
1300 list_del(&key->list);
1301 kfree(key);
1302 }
55ed8ca1
JH
1303
1304 return 0;
1305}
1306
c9839a11
VCG
1307int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1308 int new_key, u8 authenticated, u8 tk[16],
1309 u8 enc_size, u16 ediv, u8 rand[8])
75d262c2 1310{
c9839a11 1311 struct smp_ltk *key, *old_key;
75d262c2 1312
c9839a11
VCG
1313 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1314 return 0;
75d262c2 1315
c9839a11
VCG
1316 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1317 if (old_key)
75d262c2 1318 key = old_key;
c9839a11
VCG
1319 else {
1320 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1321 if (!key)
1322 return -ENOMEM;
c9839a11 1323 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1324 }
1325
75d262c2 1326 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1327 key->bdaddr_type = addr_type;
1328 memcpy(key->val, tk, sizeof(key->val));
1329 key->authenticated = authenticated;
1330 key->ediv = ediv;
1331 key->enc_size = enc_size;
1332 key->type = type;
1333 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1334
c9839a11
VCG
1335 if (!new_key)
1336 return 0;
75d262c2 1337
261cc5aa
VCG
1338 if (type & HCI_SMP_LTK)
1339 mgmt_new_ltk(hdev, key, 1);
1340
75d262c2
VCG
1341 return 0;
1342}
1343
55ed8ca1
JH
1344int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1345{
1346 struct link_key *key;
1347
1348 key = hci_find_link_key(hdev, bdaddr);
1349 if (!key)
1350 return -ENOENT;
1351
1352 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1353
1354 list_del(&key->list);
1355 kfree(key);
1356
1357 return 0;
1358}
1359
b899efaf
VCG
1360int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1361{
1362 struct smp_ltk *k, *tmp;
1363
1364 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1365 if (bacmp(bdaddr, &k->bdaddr))
1366 continue;
1367
1368 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1369
1370 list_del(&k->list);
1371 kfree(k);
1372 }
1373
1374 return 0;
1375}
1376
6bd32326
VT
1377/* HCI command timer function */
1378static void hci_cmd_timer(unsigned long arg)
1379{
1380 struct hci_dev *hdev = (void *) arg;
1381
1382 BT_ERR("%s command tx timeout", hdev->name);
1383 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1384 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1385}
1386
2763eda6
SJ
1387struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1388 bdaddr_t *bdaddr)
1389{
1390 struct oob_data *data;
1391
1392 list_for_each_entry(data, &hdev->remote_oob_data, list)
1393 if (bacmp(bdaddr, &data->bdaddr) == 0)
1394 return data;
1395
1396 return NULL;
1397}
1398
1399int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1400{
1401 struct oob_data *data;
1402
1403 data = hci_find_remote_oob_data(hdev, bdaddr);
1404 if (!data)
1405 return -ENOENT;
1406
1407 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1408
1409 list_del(&data->list);
1410 kfree(data);
1411
1412 return 0;
1413}
1414
1415int hci_remote_oob_data_clear(struct hci_dev *hdev)
1416{
1417 struct oob_data *data, *n;
1418
1419 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1420 list_del(&data->list);
1421 kfree(data);
1422 }
1423
1424 return 0;
1425}
1426
1427int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1428 u8 *randomizer)
1429{
1430 struct oob_data *data;
1431
1432 data = hci_find_remote_oob_data(hdev, bdaddr);
1433
1434 if (!data) {
1435 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1436 if (!data)
1437 return -ENOMEM;
1438
1439 bacpy(&data->bdaddr, bdaddr);
1440 list_add(&data->list, &hdev->remote_oob_data);
1441 }
1442
1443 memcpy(data->hash, hash, sizeof(data->hash));
1444 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1445
1446 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1447
1448 return 0;
1449}
1450
b2a66aad
AJ
1451struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1452 bdaddr_t *bdaddr)
1453{
8035ded4 1454 struct bdaddr_list *b;
b2a66aad 1455
8035ded4 1456 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1457 if (bacmp(bdaddr, &b->bdaddr) == 0)
1458 return b;
b2a66aad
AJ
1459
1460 return NULL;
1461}
1462
1463int hci_blacklist_clear(struct hci_dev *hdev)
1464{
1465 struct list_head *p, *n;
1466
1467 list_for_each_safe(p, n, &hdev->blacklist) {
1468 struct bdaddr_list *b;
1469
1470 b = list_entry(p, struct bdaddr_list, list);
1471
1472 list_del(p);
1473 kfree(b);
1474 }
1475
1476 return 0;
1477}
1478
88c1fe4b 1479int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1480{
1481 struct bdaddr_list *entry;
b2a66aad
AJ
1482
1483 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1484 return -EBADF;
1485
5e762444
AJ
1486 if (hci_blacklist_lookup(hdev, bdaddr))
1487 return -EEXIST;
b2a66aad
AJ
1488
1489 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1490 if (!entry)
1491 return -ENOMEM;
b2a66aad
AJ
1492
1493 bacpy(&entry->bdaddr, bdaddr);
1494
1495 list_add(&entry->list, &hdev->blacklist);
1496
88c1fe4b 1497 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1498}
1499
88c1fe4b 1500int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1501{
1502 struct bdaddr_list *entry;
b2a66aad 1503
1ec918ce 1504 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1505 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1506
1507 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1508 if (!entry)
5e762444 1509 return -ENOENT;
b2a66aad
AJ
1510
1511 list_del(&entry->list);
1512 kfree(entry);
1513
88c1fe4b 1514 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1515}
1516
db323f2f 1517static void hci_clear_adv_cache(struct work_struct *work)
35815085 1518{
db323f2f
GP
1519 struct hci_dev *hdev = container_of(work, struct hci_dev,
1520 adv_work.work);
35815085
AG
1521
1522 hci_dev_lock(hdev);
1523
1524 hci_adv_entries_clear(hdev);
1525
1526 hci_dev_unlock(hdev);
1527}
1528
76c8686f
AG
1529int hci_adv_entries_clear(struct hci_dev *hdev)
1530{
1531 struct adv_entry *entry, *tmp;
1532
1533 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1534 list_del(&entry->list);
1535 kfree(entry);
1536 }
1537
1538 BT_DBG("%s adv cache cleared", hdev->name);
1539
1540 return 0;
1541}
1542
1543struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1544{
1545 struct adv_entry *entry;
1546
1547 list_for_each_entry(entry, &hdev->adv_entries, list)
1548 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1549 return entry;
1550
1551 return NULL;
1552}
1553
1554static inline int is_connectable_adv(u8 evt_type)
1555{
1556 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1557 return 1;
1558
1559 return 0;
1560}
1561
1562int hci_add_adv_entry(struct hci_dev *hdev,
1563 struct hci_ev_le_advertising_info *ev)
1564{
1565 struct adv_entry *entry;
1566
1567 if (!is_connectable_adv(ev->evt_type))
1568 return -EINVAL;
1569
1570 /* Only new entries should be added to adv_entries. So, if
1571 * bdaddr was found, don't add it. */
1572 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1573 return 0;
1574
4777bfde 1575 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
76c8686f
AG
1576 if (!entry)
1577 return -ENOMEM;
1578
1579 bacpy(&entry->bdaddr, &ev->bdaddr);
1580 entry->bdaddr_type = ev->bdaddr_type;
1581
1582 list_add(&entry->list, &hdev->adv_entries);
1583
1584 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1585 batostr(&entry->bdaddr), entry->bdaddr_type);
1586
1587 return 0;
1588}
1589
7ba8b4be
AG
1590static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1591{
1592 struct le_scan_params *param = (struct le_scan_params *) opt;
1593 struct hci_cp_le_set_scan_param cp;
1594
1595 memset(&cp, 0, sizeof(cp));
1596 cp.type = param->type;
1597 cp.interval = cpu_to_le16(param->interval);
1598 cp.window = cpu_to_le16(param->window);
1599
1600 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1601}
1602
1603static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1604{
1605 struct hci_cp_le_set_scan_enable cp;
1606
1607 memset(&cp, 0, sizeof(cp));
1608 cp.enable = 1;
1609
1610 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1611}
1612
1613static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1614 u16 window, int timeout)
1615{
1616 long timeo = msecs_to_jiffies(3000);
1617 struct le_scan_params param;
1618 int err;
1619
1620 BT_DBG("%s", hdev->name);
1621
1622 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1623 return -EINPROGRESS;
1624
1625 param.type = type;
1626 param.interval = interval;
1627 param.window = window;
1628
1629 hci_req_lock(hdev);
1630
1631 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1632 timeo);
1633 if (!err)
1634 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1635
1636 hci_req_unlock(hdev);
1637
1638 if (err < 0)
1639 return err;
1640
1641 schedule_delayed_work(&hdev->le_scan_disable,
1642 msecs_to_jiffies(timeout));
1643
1644 return 0;
1645}
1646
1647static void le_scan_disable_work(struct work_struct *work)
1648{
1649 struct hci_dev *hdev = container_of(work, struct hci_dev,
1650 le_scan_disable.work);
1651 struct hci_cp_le_set_scan_enable cp;
1652
1653 BT_DBG("%s", hdev->name);
1654
1655 memset(&cp, 0, sizeof(cp));
1656
1657 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1658}
1659
28b75a89
AG
1660static void le_scan_work(struct work_struct *work)
1661{
1662 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1663 struct le_scan_params *param = &hdev->le_scan_params;
1664
1665 BT_DBG("%s", hdev->name);
1666
1667 hci_do_le_scan(hdev, param->type, param->interval,
1668 param->window, param->timeout);
1669}
1670
1671int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1672 int timeout)
1673{
1674 struct le_scan_params *param = &hdev->le_scan_params;
1675
1676 BT_DBG("%s", hdev->name);
1677
1678 if (work_busy(&hdev->le_scan))
1679 return -EINPROGRESS;
1680
1681 param->type = type;
1682 param->interval = interval;
1683 param->window = window;
1684 param->timeout = timeout;
1685
1686 queue_work(system_long_wq, &hdev->le_scan);
1687
1688 return 0;
1689}
1690
1da177e4
LT
1691/* Register HCI device */
1692int hci_register_dev(struct hci_dev *hdev)
1693{
1694 struct list_head *head = &hci_dev_list, *p;
08add513 1695 int i, id, error;
1da177e4 1696
e9b9cfa1 1697 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1698
010666a1 1699 if (!hdev->open || !hdev->close)
1da177e4
LT
1700 return -EINVAL;
1701
08add513
MM
1702 /* Do not allow HCI_AMP devices to register at index 0,
1703 * so the index can be used as the AMP controller ID.
1704 */
1705 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1706
f20d09d5 1707 write_lock(&hci_dev_list_lock);
1da177e4
LT
1708
1709 /* Find first available device id */
1710 list_for_each(p, &hci_dev_list) {
1711 if (list_entry(p, struct hci_dev, list)->id != id)
1712 break;
1713 head = p; id++;
1714 }
8e87d142 1715
1da177e4
LT
1716 sprintf(hdev->name, "hci%d", id);
1717 hdev->id = id;
c6feeb28 1718 list_add_tail(&hdev->list, head);
1da177e4 1719
09fd0de5 1720 mutex_init(&hdev->lock);
1da177e4
LT
1721
1722 hdev->flags = 0;
d23264a8 1723 hdev->dev_flags = 0;
1da177e4 1724 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1725 hdev->esco_type = (ESCO_HV1);
1da177e4 1726 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1727 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1728
04837f64
MH
1729 hdev->idle_timeout = 0;
1730 hdev->sniff_max_interval = 800;
1731 hdev->sniff_min_interval = 80;
1732
b78752cc 1733 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1734 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1735 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1736
1da177e4
LT
1737
1738 skb_queue_head_init(&hdev->rx_q);
1739 skb_queue_head_init(&hdev->cmd_q);
1740 skb_queue_head_init(&hdev->raw_q);
1741
6bd32326
VT
1742 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1743
cd4c5391 1744 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1745 hdev->reassembly[i] = NULL;
1746
1da177e4 1747 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1748 mutex_init(&hdev->req_lock);
1da177e4 1749
30883512 1750 discovery_init(hdev);
1da177e4
LT
1751
1752 hci_conn_hash_init(hdev);
1753
2e58ef3e
JH
1754 INIT_LIST_HEAD(&hdev->mgmt_pending);
1755
ea4bd8ba 1756 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1757
2aeb9a1a
JH
1758 INIT_LIST_HEAD(&hdev->uuids);
1759
55ed8ca1 1760 INIT_LIST_HEAD(&hdev->link_keys);
b899efaf 1761 INIT_LIST_HEAD(&hdev->long_term_keys);
55ed8ca1 1762
2763eda6
SJ
1763 INIT_LIST_HEAD(&hdev->remote_oob_data);
1764
76c8686f
AG
1765 INIT_LIST_HEAD(&hdev->adv_entries);
1766
db323f2f 1767 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1768 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1769 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1770
16ab91ab
JH
1771 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1772
1da177e4
LT
1773 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1774
1775 atomic_set(&hdev->promisc, 0);
1776
28b75a89
AG
1777 INIT_WORK(&hdev->le_scan, le_scan_work);
1778
7ba8b4be
AG
1779 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1780
f20d09d5 1781 write_unlock(&hci_dev_list_lock);
1da177e4 1782
32845eb1
GP
1783 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1784 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1785 if (!hdev->workqueue) {
1786 error = -ENOMEM;
1787 goto err;
1788 }
f48fd9c8 1789
33ca954d
DH
1790 error = hci_add_sysfs(hdev);
1791 if (error < 0)
1792 goto err_wqueue;
1da177e4 1793
611b30f7
MH
1794 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1795 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1796 if (hdev->rfkill) {
1797 if (rfkill_register(hdev->rfkill) < 0) {
1798 rfkill_destroy(hdev->rfkill);
1799 hdev->rfkill = NULL;
1800 }
1801 }
1802
a8b2d5c2
JH
1803 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1804 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1805 schedule_work(&hdev->power_on);
ab81cbf9 1806
1da177e4 1807 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1808 hci_dev_hold(hdev);
1da177e4
LT
1809
1810 return id;
f48fd9c8 1811
33ca954d
DH
1812err_wqueue:
1813 destroy_workqueue(hdev->workqueue);
1814err:
f20d09d5 1815 write_lock(&hci_dev_list_lock);
f48fd9c8 1816 list_del(&hdev->list);
f20d09d5 1817 write_unlock(&hci_dev_list_lock);
f48fd9c8 1818
33ca954d 1819 return error;
1da177e4
LT
1820}
1821EXPORT_SYMBOL(hci_register_dev);
1822
1823/* Unregister HCI device */
59735631 1824void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1825{
ef222013
MH
1826 int i;
1827
c13854ce 1828 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1829
f20d09d5 1830 write_lock(&hci_dev_list_lock);
1da177e4 1831 list_del(&hdev->list);
f20d09d5 1832 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1833
1834 hci_dev_do_close(hdev);
1835
cd4c5391 1836 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1837 kfree_skb(hdev->reassembly[i]);
1838
ab81cbf9 1839 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8b2d5c2 1840 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1841 hci_dev_lock(hdev);
744cf19e 1842 mgmt_index_removed(hdev);
09fd0de5 1843 hci_dev_unlock(hdev);
56e5cb86 1844 }
ab81cbf9 1845
2e58ef3e
JH
1846 /* mgmt_index_removed should take care of emptying the
1847 * pending list */
1848 BUG_ON(!list_empty(&hdev->mgmt_pending));
1849
1da177e4
LT
1850 hci_notify(hdev, HCI_DEV_UNREG);
1851
611b30f7
MH
1852 if (hdev->rfkill) {
1853 rfkill_unregister(hdev->rfkill);
1854 rfkill_destroy(hdev->rfkill);
1855 }
1856
ce242970 1857 hci_del_sysfs(hdev);
147e2d59 1858
db323f2f 1859 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1860
f48fd9c8
MH
1861 destroy_workqueue(hdev->workqueue);
1862
09fd0de5 1863 hci_dev_lock(hdev);
e2e0cacb 1864 hci_blacklist_clear(hdev);
2aeb9a1a 1865 hci_uuids_clear(hdev);
55ed8ca1 1866 hci_link_keys_clear(hdev);
b899efaf 1867 hci_smp_ltks_clear(hdev);
2763eda6 1868 hci_remote_oob_data_clear(hdev);
76c8686f 1869 hci_adv_entries_clear(hdev);
09fd0de5 1870 hci_dev_unlock(hdev);
e2e0cacb 1871
dc946bd8 1872 hci_dev_put(hdev);
1da177e4
LT
1873}
1874EXPORT_SYMBOL(hci_unregister_dev);
1875
1876/* Suspend HCI device */
1877int hci_suspend_dev(struct hci_dev *hdev)
1878{
1879 hci_notify(hdev, HCI_DEV_SUSPEND);
1880 return 0;
1881}
1882EXPORT_SYMBOL(hci_suspend_dev);
1883
1884/* Resume HCI device */
1885int hci_resume_dev(struct hci_dev *hdev)
1886{
1887 hci_notify(hdev, HCI_DEV_RESUME);
1888 return 0;
1889}
1890EXPORT_SYMBOL(hci_resume_dev);
1891
76bca880
MH
1892/* Receive frame from HCI drivers */
1893int hci_recv_frame(struct sk_buff *skb)
1894{
1895 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1896 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1897 && !test_bit(HCI_INIT, &hdev->flags))) {
1898 kfree_skb(skb);
1899 return -ENXIO;
1900 }
1901
1902 /* Incomming skb */
1903 bt_cb(skb)->incoming = 1;
1904
1905 /* Time stamp */
1906 __net_timestamp(skb);
1907
76bca880 1908 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1909 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1910
76bca880
MH
1911 return 0;
1912}
1913EXPORT_SYMBOL(hci_recv_frame);
1914
33e882a5 1915static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1916 int count, __u8 index)
33e882a5
SS
1917{
1918 int len = 0;
1919 int hlen = 0;
1920 int remain = count;
1921 struct sk_buff *skb;
1922 struct bt_skb_cb *scb;
1923
1924 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1925 index >= NUM_REASSEMBLY)
1926 return -EILSEQ;
1927
1928 skb = hdev->reassembly[index];
1929
1930 if (!skb) {
1931 switch (type) {
1932 case HCI_ACLDATA_PKT:
1933 len = HCI_MAX_FRAME_SIZE;
1934 hlen = HCI_ACL_HDR_SIZE;
1935 break;
1936 case HCI_EVENT_PKT:
1937 len = HCI_MAX_EVENT_SIZE;
1938 hlen = HCI_EVENT_HDR_SIZE;
1939 break;
1940 case HCI_SCODATA_PKT:
1941 len = HCI_MAX_SCO_SIZE;
1942 hlen = HCI_SCO_HDR_SIZE;
1943 break;
1944 }
1945
1e429f38 1946 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1947 if (!skb)
1948 return -ENOMEM;
1949
1950 scb = (void *) skb->cb;
1951 scb->expect = hlen;
1952 scb->pkt_type = type;
1953
1954 skb->dev = (void *) hdev;
1955 hdev->reassembly[index] = skb;
1956 }
1957
1958 while (count) {
1959 scb = (void *) skb->cb;
1960 len = min(scb->expect, (__u16)count);
1961
1962 memcpy(skb_put(skb, len), data, len);
1963
1964 count -= len;
1965 data += len;
1966 scb->expect -= len;
1967 remain = count;
1968
1969 switch (type) {
1970 case HCI_EVENT_PKT:
1971 if (skb->len == HCI_EVENT_HDR_SIZE) {
1972 struct hci_event_hdr *h = hci_event_hdr(skb);
1973 scb->expect = h->plen;
1974
1975 if (skb_tailroom(skb) < scb->expect) {
1976 kfree_skb(skb);
1977 hdev->reassembly[index] = NULL;
1978 return -ENOMEM;
1979 }
1980 }
1981 break;
1982
1983 case HCI_ACLDATA_PKT:
1984 if (skb->len == HCI_ACL_HDR_SIZE) {
1985 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1986 scb->expect = __le16_to_cpu(h->dlen);
1987
1988 if (skb_tailroom(skb) < scb->expect) {
1989 kfree_skb(skb);
1990 hdev->reassembly[index] = NULL;
1991 return -ENOMEM;
1992 }
1993 }
1994 break;
1995
1996 case HCI_SCODATA_PKT:
1997 if (skb->len == HCI_SCO_HDR_SIZE) {
1998 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1999 scb->expect = h->dlen;
2000
2001 if (skb_tailroom(skb) < scb->expect) {
2002 kfree_skb(skb);
2003 hdev->reassembly[index] = NULL;
2004 return -ENOMEM;
2005 }
2006 }
2007 break;
2008 }
2009
2010 if (scb->expect == 0) {
2011 /* Complete frame */
2012
2013 bt_cb(skb)->pkt_type = type;
2014 hci_recv_frame(skb);
2015
2016 hdev->reassembly[index] = NULL;
2017 return remain;
2018 }
2019 }
2020
2021 return remain;
2022}
2023
ef222013
MH
2024int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2025{
f39a3c06
SS
2026 int rem = 0;
2027
ef222013
MH
2028 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2029 return -EILSEQ;
2030
da5f6c37 2031 while (count) {
1e429f38 2032 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2033 if (rem < 0)
2034 return rem;
ef222013 2035
f39a3c06
SS
2036 data += (count - rem);
2037 count = rem;
f81c6224 2038 }
ef222013 2039
f39a3c06 2040 return rem;
ef222013
MH
2041}
2042EXPORT_SYMBOL(hci_recv_fragment);
2043
99811510
SS
2044#define STREAM_REASSEMBLY 0
2045
2046int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2047{
2048 int type;
2049 int rem = 0;
2050
da5f6c37 2051 while (count) {
99811510
SS
2052 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2053
2054 if (!skb) {
2055 struct { char type; } *pkt;
2056
2057 /* Start of the frame */
2058 pkt = data;
2059 type = pkt->type;
2060
2061 data++;
2062 count--;
2063 } else
2064 type = bt_cb(skb)->pkt_type;
2065
1e429f38
GP
2066 rem = hci_reassembly(hdev, type, data, count,
2067 STREAM_REASSEMBLY);
99811510
SS
2068 if (rem < 0)
2069 return rem;
2070
2071 data += (count - rem);
2072 count = rem;
f81c6224 2073 }
99811510
SS
2074
2075 return rem;
2076}
2077EXPORT_SYMBOL(hci_recv_stream_fragment);
2078
1da177e4
LT
2079/* ---- Interface to upper protocols ---- */
2080
1da177e4
LT
2081int hci_register_cb(struct hci_cb *cb)
2082{
2083 BT_DBG("%p name %s", cb, cb->name);
2084
f20d09d5 2085 write_lock(&hci_cb_list_lock);
1da177e4 2086 list_add(&cb->list, &hci_cb_list);
f20d09d5 2087 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2088
2089 return 0;
2090}
2091EXPORT_SYMBOL(hci_register_cb);
2092
2093int hci_unregister_cb(struct hci_cb *cb)
2094{
2095 BT_DBG("%p name %s", cb, cb->name);
2096
f20d09d5 2097 write_lock(&hci_cb_list_lock);
1da177e4 2098 list_del(&cb->list);
f20d09d5 2099 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2100
2101 return 0;
2102}
2103EXPORT_SYMBOL(hci_unregister_cb);
2104
2105static int hci_send_frame(struct sk_buff *skb)
2106{
2107 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2108
2109 if (!hdev) {
2110 kfree_skb(skb);
2111 return -ENODEV;
2112 }
2113
0d48d939 2114 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
2115
2116 if (atomic_read(&hdev->promisc)) {
2117 /* Time stamp */
a61bbcf2 2118 __net_timestamp(skb);
1da177e4 2119
470fe1b5 2120 hci_send_to_sock(hdev, skb);
1da177e4
LT
2121 }
2122
2123 /* Get rid of skb owner, prior to sending to the driver. */
2124 skb_orphan(skb);
2125
2126 return hdev->send(skb);
2127}
2128
2129/* Send HCI command */
a9de9248 2130int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2131{
2132 int len = HCI_COMMAND_HDR_SIZE + plen;
2133 struct hci_command_hdr *hdr;
2134 struct sk_buff *skb;
2135
a9de9248 2136 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2137
2138 skb = bt_skb_alloc(len, GFP_ATOMIC);
2139 if (!skb) {
ef222013 2140 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2141 return -ENOMEM;
2142 }
2143
2144 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2145 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2146 hdr->plen = plen;
2147
2148 if (plen)
2149 memcpy(skb_put(skb, plen), param, plen);
2150
2151 BT_DBG("skb len %d", skb->len);
2152
0d48d939 2153 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2154 skb->dev = (void *) hdev;
c78ae283 2155
a5040efa
JH
2156 if (test_bit(HCI_INIT, &hdev->flags))
2157 hdev->init_last_cmd = opcode;
2158
1da177e4 2159 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2160 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2161
2162 return 0;
2163}
1da177e4
LT
2164
2165/* Get data from the previously sent command */
a9de9248 2166void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2167{
2168 struct hci_command_hdr *hdr;
2169
2170 if (!hdev->sent_cmd)
2171 return NULL;
2172
2173 hdr = (void *) hdev->sent_cmd->data;
2174
a9de9248 2175 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2176 return NULL;
2177
a9de9248 2178 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2179
2180 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2181}
2182
2183/* Send ACL data */
2184static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2185{
2186 struct hci_acl_hdr *hdr;
2187 int len = skb->len;
2188
badff6d0
ACM
2189 skb_push(skb, HCI_ACL_HDR_SIZE);
2190 skb_reset_transport_header(skb);
9c70220b 2191 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2192 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2193 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2194}
2195
73d80deb
LAD
2196static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2197 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2198{
2199 struct hci_dev *hdev = conn->hdev;
2200 struct sk_buff *list;
2201
70f23020
AE
2202 list = skb_shinfo(skb)->frag_list;
2203 if (!list) {
1da177e4
LT
2204 /* Non fragmented */
2205 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2206
73d80deb 2207 skb_queue_tail(queue, skb);
1da177e4
LT
2208 } else {
2209 /* Fragmented */
2210 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2211
2212 skb_shinfo(skb)->frag_list = NULL;
2213
2214 /* Queue all fragments atomically */
af3e6359 2215 spin_lock(&queue->lock);
1da177e4 2216
73d80deb 2217 __skb_queue_tail(queue, skb);
e702112f
AE
2218
2219 flags &= ~ACL_START;
2220 flags |= ACL_CONT;
1da177e4
LT
2221 do {
2222 skb = list; list = list->next;
8e87d142 2223
1da177e4 2224 skb->dev = (void *) hdev;
0d48d939 2225 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2226 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2227
2228 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2229
73d80deb 2230 __skb_queue_tail(queue, skb);
1da177e4
LT
2231 } while (list);
2232
af3e6359 2233 spin_unlock(&queue->lock);
1da177e4 2234 }
73d80deb
LAD
2235}
2236
2237void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2238{
2239 struct hci_conn *conn = chan->conn;
2240 struct hci_dev *hdev = conn->hdev;
2241
2242 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2243
2244 skb->dev = (void *) hdev;
2245 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2246 hci_add_acl_hdr(skb, conn->handle, flags);
2247
2248 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2249
3eff45ea 2250 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2251}
2252EXPORT_SYMBOL(hci_send_acl);
2253
2254/* Send SCO data */
0d861d8b 2255void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2256{
2257 struct hci_dev *hdev = conn->hdev;
2258 struct hci_sco_hdr hdr;
2259
2260 BT_DBG("%s len %d", hdev->name, skb->len);
2261
aca3192c 2262 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2263 hdr.dlen = skb->len;
2264
badff6d0
ACM
2265 skb_push(skb, HCI_SCO_HDR_SIZE);
2266 skb_reset_transport_header(skb);
9c70220b 2267 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2268
2269 skb->dev = (void *) hdev;
0d48d939 2270 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2271
1da177e4 2272 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2273 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2274}
2275EXPORT_SYMBOL(hci_send_sco);
2276
2277/* ---- HCI TX task (outgoing data) ---- */
2278
2279/* HCI Connection scheduler */
2280static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2281{
2282 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2283 struct hci_conn *conn = NULL, *c;
1da177e4 2284 int num = 0, min = ~0;
1da177e4 2285
8e87d142 2286 /* We don't have to lock device here. Connections are always
1da177e4 2287 * added and removed with TX task disabled. */
bf4c6325
GP
2288
2289 rcu_read_lock();
2290
2291 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2292 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2293 continue;
769be974
MH
2294
2295 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2296 continue;
2297
1da177e4
LT
2298 num++;
2299
2300 if (c->sent < min) {
2301 min = c->sent;
2302 conn = c;
2303 }
52087a79
LAD
2304
2305 if (hci_conn_num(hdev, type) == num)
2306 break;
1da177e4
LT
2307 }
2308
bf4c6325
GP
2309 rcu_read_unlock();
2310
1da177e4 2311 if (conn) {
6ed58ec5
VT
2312 int cnt, q;
2313
2314 switch (conn->type) {
2315 case ACL_LINK:
2316 cnt = hdev->acl_cnt;
2317 break;
2318 case SCO_LINK:
2319 case ESCO_LINK:
2320 cnt = hdev->sco_cnt;
2321 break;
2322 case LE_LINK:
2323 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2324 break;
2325 default:
2326 cnt = 0;
2327 BT_ERR("Unknown link type");
2328 }
2329
2330 q = cnt / num;
1da177e4
LT
2331 *quote = q ? q : 1;
2332 } else
2333 *quote = 0;
2334
2335 BT_DBG("conn %p quote %d", conn, *quote);
2336 return conn;
2337}
2338
bae1f5d9 2339static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2340{
2341 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2342 struct hci_conn *c;
1da177e4 2343
bae1f5d9 2344 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2345
bf4c6325
GP
2346 rcu_read_lock();
2347
1da177e4 2348 /* Kill stalled connections */
bf4c6325 2349 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2350 if (c->type == type && c->sent) {
2351 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2352 hdev->name, batostr(&c->dst));
2353 hci_acl_disconn(c, 0x13);
2354 }
2355 }
bf4c6325
GP
2356
2357 rcu_read_unlock();
1da177e4
LT
2358}
2359
73d80deb
LAD
2360static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2361 int *quote)
1da177e4 2362{
73d80deb
LAD
2363 struct hci_conn_hash *h = &hdev->conn_hash;
2364 struct hci_chan *chan = NULL;
2365 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2366 struct hci_conn *conn;
73d80deb
LAD
2367 int cnt, q, conn_num = 0;
2368
2369 BT_DBG("%s", hdev->name);
2370
bf4c6325
GP
2371 rcu_read_lock();
2372
2373 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2374 struct hci_chan *tmp;
2375
2376 if (conn->type != type)
2377 continue;
2378
2379 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2380 continue;
2381
2382 conn_num++;
2383
8192edef 2384 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2385 struct sk_buff *skb;
2386
2387 if (skb_queue_empty(&tmp->data_q))
2388 continue;
2389
2390 skb = skb_peek(&tmp->data_q);
2391 if (skb->priority < cur_prio)
2392 continue;
2393
2394 if (skb->priority > cur_prio) {
2395 num = 0;
2396 min = ~0;
2397 cur_prio = skb->priority;
2398 }
2399
2400 num++;
2401
2402 if (conn->sent < min) {
2403 min = conn->sent;
2404 chan = tmp;
2405 }
2406 }
2407
2408 if (hci_conn_num(hdev, type) == conn_num)
2409 break;
2410 }
2411
bf4c6325
GP
2412 rcu_read_unlock();
2413
73d80deb
LAD
2414 if (!chan)
2415 return NULL;
2416
2417 switch (chan->conn->type) {
2418 case ACL_LINK:
2419 cnt = hdev->acl_cnt;
2420 break;
2421 case SCO_LINK:
2422 case ESCO_LINK:
2423 cnt = hdev->sco_cnt;
2424 break;
2425 case LE_LINK:
2426 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2427 break;
2428 default:
2429 cnt = 0;
2430 BT_ERR("Unknown link type");
2431 }
2432
2433 q = cnt / num;
2434 *quote = q ? q : 1;
2435 BT_DBG("chan %p quote %d", chan, *quote);
2436 return chan;
2437}
2438
02b20f0b
LAD
2439static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2440{
2441 struct hci_conn_hash *h = &hdev->conn_hash;
2442 struct hci_conn *conn;
2443 int num = 0;
2444
2445 BT_DBG("%s", hdev->name);
2446
bf4c6325
GP
2447 rcu_read_lock();
2448
2449 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2450 struct hci_chan *chan;
2451
2452 if (conn->type != type)
2453 continue;
2454
2455 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2456 continue;
2457
2458 num++;
2459
8192edef 2460 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2461 struct sk_buff *skb;
2462
2463 if (chan->sent) {
2464 chan->sent = 0;
2465 continue;
2466 }
2467
2468 if (skb_queue_empty(&chan->data_q))
2469 continue;
2470
2471 skb = skb_peek(&chan->data_q);
2472 if (skb->priority >= HCI_PRIO_MAX - 1)
2473 continue;
2474
2475 skb->priority = HCI_PRIO_MAX - 1;
2476
2477 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2478 skb->priority);
2479 }
2480
2481 if (hci_conn_num(hdev, type) == num)
2482 break;
2483 }
bf4c6325
GP
2484
2485 rcu_read_unlock();
2486
02b20f0b
LAD
2487}
2488
b71d385a
AE
2489static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2490{
2491 /* Calculate count of blocks used by this packet */
2492 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2493}
2494
63d2bc1b 2495static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2496{
1da177e4
LT
2497 if (!test_bit(HCI_RAW, &hdev->flags)) {
2498 /* ACL tx timeout must be longer than maximum
2499 * link supervision timeout (40.9 seconds) */
63d2bc1b 2500 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
cc48dc0a 2501 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2502 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2503 }
63d2bc1b 2504}
1da177e4 2505
63d2bc1b
AE
2506static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2507{
2508 unsigned int cnt = hdev->acl_cnt;
2509 struct hci_chan *chan;
2510 struct sk_buff *skb;
2511 int quote;
2512
2513 __check_timeout(hdev, cnt);
04837f64 2514
73d80deb
LAD
2515 while (hdev->acl_cnt &&
2516 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2517 u32 priority = (skb_peek(&chan->data_q))->priority;
2518 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2519 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2520 skb->len, skb->priority);
2521
ec1cce24
LAD
2522 /* Stop if priority has changed */
2523 if (skb->priority < priority)
2524 break;
2525
2526 skb = skb_dequeue(&chan->data_q);
2527
73d80deb
LAD
2528 hci_conn_enter_active_mode(chan->conn,
2529 bt_cb(skb)->force_active);
04837f64 2530
1da177e4
LT
2531 hci_send_frame(skb);
2532 hdev->acl_last_tx = jiffies;
2533
2534 hdev->acl_cnt--;
73d80deb
LAD
2535 chan->sent++;
2536 chan->conn->sent++;
1da177e4
LT
2537 }
2538 }
02b20f0b
LAD
2539
2540 if (cnt != hdev->acl_cnt)
2541 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2542}
2543
b71d385a
AE
2544static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2545{
63d2bc1b 2546 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2547 struct hci_chan *chan;
2548 struct sk_buff *skb;
2549 int quote;
b71d385a 2550
63d2bc1b 2551 __check_timeout(hdev, cnt);
b71d385a
AE
2552
2553 while (hdev->block_cnt > 0 &&
2554 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2555 u32 priority = (skb_peek(&chan->data_q))->priority;
2556 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2557 int blocks;
2558
2559 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2560 skb->len, skb->priority);
2561
2562 /* Stop if priority has changed */
2563 if (skb->priority < priority)
2564 break;
2565
2566 skb = skb_dequeue(&chan->data_q);
2567
2568 blocks = __get_blocks(hdev, skb);
2569 if (blocks > hdev->block_cnt)
2570 return;
2571
2572 hci_conn_enter_active_mode(chan->conn,
2573 bt_cb(skb)->force_active);
2574
2575 hci_send_frame(skb);
2576 hdev->acl_last_tx = jiffies;
2577
2578 hdev->block_cnt -= blocks;
2579 quote -= blocks;
2580
2581 chan->sent += blocks;
2582 chan->conn->sent += blocks;
2583 }
2584 }
2585
2586 if (cnt != hdev->block_cnt)
2587 hci_prio_recalculate(hdev, ACL_LINK);
2588}
2589
2590static inline void hci_sched_acl(struct hci_dev *hdev)
2591{
2592 BT_DBG("%s", hdev->name);
2593
2594 if (!hci_conn_num(hdev, ACL_LINK))
2595 return;
2596
2597 switch (hdev->flow_ctl_mode) {
2598 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2599 hci_sched_acl_pkt(hdev);
2600 break;
2601
2602 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2603 hci_sched_acl_blk(hdev);
2604 break;
2605 }
2606}
2607
1da177e4
LT
2608/* Schedule SCO */
2609static inline void hci_sched_sco(struct hci_dev *hdev)
2610{
2611 struct hci_conn *conn;
2612 struct sk_buff *skb;
2613 int quote;
2614
2615 BT_DBG("%s", hdev->name);
2616
52087a79
LAD
2617 if (!hci_conn_num(hdev, SCO_LINK))
2618 return;
2619
1da177e4
LT
2620 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2621 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2622 BT_DBG("skb %p len %d", skb, skb->len);
2623 hci_send_frame(skb);
2624
2625 conn->sent++;
2626 if (conn->sent == ~0)
2627 conn->sent = 0;
2628 }
2629 }
2630}
2631
b6a0dc82
MH
2632static inline void hci_sched_esco(struct hci_dev *hdev)
2633{
2634 struct hci_conn *conn;
2635 struct sk_buff *skb;
2636 int quote;
2637
2638 BT_DBG("%s", hdev->name);
2639
52087a79
LAD
2640 if (!hci_conn_num(hdev, ESCO_LINK))
2641 return;
2642
b6a0dc82
MH
2643 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2644 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2645 BT_DBG("skb %p len %d", skb, skb->len);
2646 hci_send_frame(skb);
2647
2648 conn->sent++;
2649 if (conn->sent == ~0)
2650 conn->sent = 0;
2651 }
2652 }
2653}
2654
6ed58ec5
VT
2655static inline void hci_sched_le(struct hci_dev *hdev)
2656{
73d80deb 2657 struct hci_chan *chan;
6ed58ec5 2658 struct sk_buff *skb;
02b20f0b 2659 int quote, cnt, tmp;
6ed58ec5
VT
2660
2661 BT_DBG("%s", hdev->name);
2662
52087a79
LAD
2663 if (!hci_conn_num(hdev, LE_LINK))
2664 return;
2665
6ed58ec5
VT
2666 if (!test_bit(HCI_RAW, &hdev->flags)) {
2667 /* LE tx timeout must be longer than maximum
2668 * link supervision timeout (40.9 seconds) */
bae1f5d9 2669 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2670 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2671 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2672 }
2673
2674 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2675 tmp = cnt;
73d80deb 2676 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2677 u32 priority = (skb_peek(&chan->data_q))->priority;
2678 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2679 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2680 skb->len, skb->priority);
6ed58ec5 2681
ec1cce24
LAD
2682 /* Stop if priority has changed */
2683 if (skb->priority < priority)
2684 break;
2685
2686 skb = skb_dequeue(&chan->data_q);
2687
6ed58ec5
VT
2688 hci_send_frame(skb);
2689 hdev->le_last_tx = jiffies;
2690
2691 cnt--;
73d80deb
LAD
2692 chan->sent++;
2693 chan->conn->sent++;
6ed58ec5
VT
2694 }
2695 }
73d80deb 2696
6ed58ec5
VT
2697 if (hdev->le_pkts)
2698 hdev->le_cnt = cnt;
2699 else
2700 hdev->acl_cnt = cnt;
02b20f0b
LAD
2701
2702 if (cnt != tmp)
2703 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2704}
2705
3eff45ea 2706static void hci_tx_work(struct work_struct *work)
1da177e4 2707{
3eff45ea 2708 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2709 struct sk_buff *skb;
2710
6ed58ec5
VT
2711 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2712 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2713
2714 /* Schedule queues and send stuff to HCI driver */
2715
2716 hci_sched_acl(hdev);
2717
2718 hci_sched_sco(hdev);
2719
b6a0dc82
MH
2720 hci_sched_esco(hdev);
2721
6ed58ec5
VT
2722 hci_sched_le(hdev);
2723
1da177e4
LT
2724 /* Send next queued raw (unknown type) packet */
2725 while ((skb = skb_dequeue(&hdev->raw_q)))
2726 hci_send_frame(skb);
1da177e4
LT
2727}
2728
25985edc 2729/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2730
2731/* ACL data packet */
2732static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2733{
2734 struct hci_acl_hdr *hdr = (void *) skb->data;
2735 struct hci_conn *conn;
2736 __u16 handle, flags;
2737
2738 skb_pull(skb, HCI_ACL_HDR_SIZE);
2739
2740 handle = __le16_to_cpu(hdr->handle);
2741 flags = hci_flags(handle);
2742 handle = hci_handle(handle);
2743
2744 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2745
2746 hdev->stat.acl_rx++;
2747
2748 hci_dev_lock(hdev);
2749 conn = hci_conn_hash_lookup_handle(hdev, handle);
2750 hci_dev_unlock(hdev);
8e87d142 2751
1da177e4 2752 if (conn) {
65983fc7 2753 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2754
1da177e4 2755 /* Send to upper protocol */
686ebf28
UF
2756 l2cap_recv_acldata(conn, skb, flags);
2757 return;
1da177e4 2758 } else {
8e87d142 2759 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2760 hdev->name, handle);
2761 }
2762
2763 kfree_skb(skb);
2764}
2765
2766/* SCO data packet */
2767static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2768{
2769 struct hci_sco_hdr *hdr = (void *) skb->data;
2770 struct hci_conn *conn;
2771 __u16 handle;
2772
2773 skb_pull(skb, HCI_SCO_HDR_SIZE);
2774
2775 handle = __le16_to_cpu(hdr->handle);
2776
2777 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2778
2779 hdev->stat.sco_rx++;
2780
2781 hci_dev_lock(hdev);
2782 conn = hci_conn_hash_lookup_handle(hdev, handle);
2783 hci_dev_unlock(hdev);
2784
2785 if (conn) {
1da177e4 2786 /* Send to upper protocol */
686ebf28
UF
2787 sco_recv_scodata(conn, skb);
2788 return;
1da177e4 2789 } else {
8e87d142 2790 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2791 hdev->name, handle);
2792 }
2793
2794 kfree_skb(skb);
2795}
2796
b78752cc 2797static void hci_rx_work(struct work_struct *work)
1da177e4 2798{
b78752cc 2799 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2800 struct sk_buff *skb;
2801
2802 BT_DBG("%s", hdev->name);
2803
1da177e4
LT
2804 while ((skb = skb_dequeue(&hdev->rx_q))) {
2805 if (atomic_read(&hdev->promisc)) {
2806 /* Send copy to the sockets */
470fe1b5 2807 hci_send_to_sock(hdev, skb);
1da177e4
LT
2808 }
2809
2810 if (test_bit(HCI_RAW, &hdev->flags)) {
2811 kfree_skb(skb);
2812 continue;
2813 }
2814
2815 if (test_bit(HCI_INIT, &hdev->flags)) {
2816 /* Don't process data packets in this states. */
0d48d939 2817 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2818 case HCI_ACLDATA_PKT:
2819 case HCI_SCODATA_PKT:
2820 kfree_skb(skb);
2821 continue;
3ff50b79 2822 }
1da177e4
LT
2823 }
2824
2825 /* Process frame */
0d48d939 2826 switch (bt_cb(skb)->pkt_type) {
1da177e4 2827 case HCI_EVENT_PKT:
b78752cc 2828 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2829 hci_event_packet(hdev, skb);
2830 break;
2831
2832 case HCI_ACLDATA_PKT:
2833 BT_DBG("%s ACL data packet", hdev->name);
2834 hci_acldata_packet(hdev, skb);
2835 break;
2836
2837 case HCI_SCODATA_PKT:
2838 BT_DBG("%s SCO data packet", hdev->name);
2839 hci_scodata_packet(hdev, skb);
2840 break;
2841
2842 default:
2843 kfree_skb(skb);
2844 break;
2845 }
2846 }
1da177e4
LT
2847}
2848
c347b765 2849static void hci_cmd_work(struct work_struct *work)
1da177e4 2850{
c347b765 2851 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2852 struct sk_buff *skb;
2853
2854 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2855
1da177e4 2856 /* Send queued commands */
5a08ecce
AE
2857 if (atomic_read(&hdev->cmd_cnt)) {
2858 skb = skb_dequeue(&hdev->cmd_q);
2859 if (!skb)
2860 return;
2861
7585b97a 2862 kfree_skb(hdev->sent_cmd);
1da177e4 2863
70f23020
AE
2864 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2865 if (hdev->sent_cmd) {
1da177e4
LT
2866 atomic_dec(&hdev->cmd_cnt);
2867 hci_send_frame(skb);
7bdb8a5c
SJ
2868 if (test_bit(HCI_RESET, &hdev->flags))
2869 del_timer(&hdev->cmd_timer);
2870 else
2871 mod_timer(&hdev->cmd_timer,
6bd32326 2872 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2873 } else {
2874 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2875 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2876 }
2877 }
2878}
2519a1fc
AG
2879
2880int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2881{
2882 /* General inquiry access code (GIAC) */
2883 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2884 struct hci_cp_inquiry cp;
2885
2886 BT_DBG("%s", hdev->name);
2887
2888 if (test_bit(HCI_INQUIRY, &hdev->flags))
2889 return -EINPROGRESS;
2890
4663262c
JH
2891 inquiry_cache_flush(hdev);
2892
2519a1fc
AG
2893 memset(&cp, 0, sizeof(cp));
2894 memcpy(&cp.lap, lap, sizeof(cp.lap));
2895 cp.length = length;
2896
2897 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2898}
023d5049
AG
2899
2900int hci_cancel_inquiry(struct hci_dev *hdev)
2901{
2902 BT_DBG("%s", hdev->name);
2903
2904 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2905 return -EPERM;
2906
2907 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2908}
7784d78f
AE
2909
2910module_param(enable_hs, bool, 0644);
2911MODULE_PARM_DESC(enable_hs, "Enable High Speed");