Bluetooth: Fix coding style in all .h files
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4 42#include <linux/interrupt.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
48#include <asm/system.h>
70f23020 49#include <linux/uaccess.h>
1da177e4
LT
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
ab81cbf9
JH
55#define AUTO_OFF_TIMEOUT 2000
56
b78752cc 57static void hci_rx_work(struct work_struct *work);
c347b765 58static void hci_cmd_work(struct work_struct *work);
3eff45ea 59static void hci_tx_work(struct work_struct *work);
1da177e4 60
1da177e4
LT
61/* HCI device list */
62LIST_HEAD(hci_dev_list);
63DEFINE_RWLOCK(hci_dev_list_lock);
64
65/* HCI callback list */
66LIST_HEAD(hci_cb_list);
67DEFINE_RWLOCK(hci_cb_list_lock);
68
1da177e4
LT
69/* ---- HCI notifications ---- */
70
6516455d 71static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 72{
040030ef 73 hci_sock_dev_event(hdev, event);
1da177e4
LT
74}
75
76/* ---- HCI requests ---- */
77
23bb5763 78void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 79{
23bb5763
JH
80 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
a5040efa
JH
82 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
84 */
75fb0e32
JH
85 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
86 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
87 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
96 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
97 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
23bb5763 105 return;
75fb0e32 106 }
1da177e4
LT
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
8e87d142 127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 128 unsigned long opt, __u32 timeout)
1da177e4
LT
129{
130 DECLARE_WAITQUEUE(wait, current);
131 int err = 0;
132
133 BT_DBG("%s start", hdev->name);
134
135 hdev->req_status = HCI_REQ_PEND;
136
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 req(hdev, opt);
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return -EINTR;
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
e175072f 150 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
3ff50b79 160 }
1da177e4 161
a5040efa 162 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
163
164 BT_DBG("%s end: err %d", hdev->name, err);
165
166 return err;
167}
168
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 170 unsigned long opt, __u32 timeout)
1da177e4
LT
171{
172 int ret;
173
7c6a329e
MH
174 if (!test_bit(HCI_UP, &hdev->flags))
175 return -ENETDOWN;
176
1da177e4
LT
177 /* Serialize all requests */
178 hci_req_lock(hdev);
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
181
182 return ret;
183}
184
185static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186{
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Reset device */
f630cf0d 190 set_bit(HCI_RESET, &hdev->flags);
a9de9248 191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
192}
193
e61ef499 194static void bredr_init(struct hci_dev *hdev)
1da177e4 195{
b0916ea0 196 struct hci_cp_delete_stored_link_key cp;
1ebb9252 197 __le16 param;
89f2783d 198 __u8 flt_type;
1da177e4 199
2455a3ea
AE
200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
1da177e4
LT
202 /* Mandatory initialization */
203
204 /* Reset */
f630cf0d 205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 208 }
1da177e4
LT
209
210 /* Read Local Supported Features */
a9de9248 211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 212
1143e5a6 213 /* Read Local Version */
a9de9248 214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 215
1da177e4 216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 218
1da177e4 219 /* Read BD Address */
a9de9248
MH
220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
227
228 /* Read Voice Setting */
a9de9248 229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
89f2783d 234 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 236
1da177e4 237 /* Connection accept timeout ~20 secs */
aca3192c 238 param = cpu_to_le16(0x7d00);
a9de9248 239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
244}
245
e61ef499
AE
246static void amp_init(struct hci_dev *hdev)
247{
2455a3ea
AE
248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
e61ef499
AE
250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255}
256
257static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258{
259 struct sk_buff *skb;
260
261 BT_DBG("%s %ld", hdev->name, opt);
262
263 /* Driver initialization */
264
265 /* Special commands */
266 while ((skb = skb_dequeue(&hdev->driver_init))) {
267 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268 skb->dev = (void *) hdev;
269
270 skb_queue_tail(&hdev->cmd_q, skb);
271 queue_work(hdev->workqueue, &hdev->cmd_work);
272 }
273 skb_queue_purge(&hdev->driver_init);
274
275 switch (hdev->dev_type) {
276 case HCI_BREDR:
277 bredr_init(hdev);
278 break;
279
280 case HCI_AMP:
281 amp_init(hdev);
282 break;
283
284 default:
285 BT_ERR("Unknown device type %d", hdev->dev_type);
286 break;
287 }
288
289}
290
6ed58ec5
VT
291static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292{
293 BT_DBG("%s", hdev->name);
294
295 /* Read LE buffer size */
296 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
297}
298
1da177e4
LT
299static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 scan = opt;
302
303 BT_DBG("%s %x", hdev->name, scan);
304
305 /* Inquiry and Page scans */
a9de9248 306 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
307}
308
309static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 auth = opt;
312
313 BT_DBG("%s %x", hdev->name, auth);
314
315 /* Authentication */
a9de9248 316 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
317}
318
319static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 encrypt = opt;
322
323 BT_DBG("%s %x", hdev->name, encrypt);
324
e4e8e37c 325 /* Encryption */
a9de9248 326 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
327}
328
e4e8e37c
MH
329static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __le16 policy = cpu_to_le16(opt);
332
a418b893 333 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
334
335 /* Default link policy */
336 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
337}
338
8e87d142 339/* Get HCI device by index.
1da177e4
LT
340 * Device is held on return. */
341struct hci_dev *hci_dev_get(int index)
342{
8035ded4 343 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
344
345 BT_DBG("%d", index);
346
347 if (index < 0)
348 return NULL;
349
350 read_lock(&hci_dev_list_lock);
8035ded4 351 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
352 if (d->id == index) {
353 hdev = hci_dev_hold(d);
354 break;
355 }
356 }
357 read_unlock(&hci_dev_list_lock);
358 return hdev;
359}
1da177e4
LT
360
361/* ---- Inquiry support ---- */
ff9ef578 362
30dc78e1
JH
363bool hci_discovery_active(struct hci_dev *hdev)
364{
365 struct discovery_state *discov = &hdev->discovery;
366
6fbe195d 367 switch (discov->state) {
343f935b 368 case DISCOVERY_FINDING:
6fbe195d 369 case DISCOVERY_RESOLVING:
30dc78e1
JH
370 return true;
371
6fbe195d
AG
372 default:
373 return false;
374 }
30dc78e1
JH
375}
376
ff9ef578
JH
377void hci_discovery_set_state(struct hci_dev *hdev, int state)
378{
379 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380
381 if (hdev->discovery.state == state)
382 return;
383
384 switch (state) {
385 case DISCOVERY_STOPPED:
7b99b659
AG
386 if (hdev->discovery.state != DISCOVERY_STARTING)
387 mgmt_discovering(hdev, 0);
f963e8e9 388 hdev->discovery.type = 0;
ff9ef578
JH
389 break;
390 case DISCOVERY_STARTING:
391 break;
343f935b 392 case DISCOVERY_FINDING:
ff9ef578
JH
393 mgmt_discovering(hdev, 1);
394 break;
30dc78e1
JH
395 case DISCOVERY_RESOLVING:
396 break;
ff9ef578
JH
397 case DISCOVERY_STOPPING:
398 break;
399 }
400
401 hdev->discovery.state = state;
402}
403
1da177e4
LT
404static void inquiry_cache_flush(struct hci_dev *hdev)
405{
30883512 406 struct discovery_state *cache = &hdev->discovery;
b57c1a56 407 struct inquiry_entry *p, *n;
1da177e4 408
561aafbc
JH
409 list_for_each_entry_safe(p, n, &cache->all, all) {
410 list_del(&p->all);
b57c1a56 411 kfree(p);
1da177e4 412 }
561aafbc
JH
413
414 INIT_LIST_HEAD(&cache->unknown);
415 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
416}
417
418struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
419{
30883512 420 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
421 struct inquiry_entry *e;
422
423 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424
561aafbc
JH
425 list_for_each_entry(e, &cache->all, all) {
426 if (!bacmp(&e->data.bdaddr, bdaddr))
427 return e;
428 }
429
430 return NULL;
431}
432
433struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
434 bdaddr_t *bdaddr)
435{
30883512 436 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
437 struct inquiry_entry *e;
438
439 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
440
441 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 442 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
443 return e;
444 }
445
446 return NULL;
1da177e4
LT
447}
448
30dc78e1
JH
449struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
450 bdaddr_t *bdaddr,
451 int state)
452{
453 struct discovery_state *cache = &hdev->discovery;
454 struct inquiry_entry *e;
455
456 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
457
458 list_for_each_entry(e, &cache->resolve, list) {
459 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
460 return e;
461 if (!bacmp(&e->data.bdaddr, bdaddr))
462 return e;
463 }
464
465 return NULL;
466}
467
a3d4e20a
JH
468void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
469 struct inquiry_entry *ie)
470{
471 struct discovery_state *cache = &hdev->discovery;
472 struct list_head *pos = &cache->resolve;
473 struct inquiry_entry *p;
474
475 list_del(&ie->list);
476
477 list_for_each_entry(p, &cache->resolve, list) {
478 if (p->name_state != NAME_PENDING &&
479 abs(p->data.rssi) >= abs(ie->data.rssi))
480 break;
481 pos = &p->list;
482 }
483
484 list_add(&ie->list, pos);
485}
486
3175405b 487bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
388fc8fa 488 bool name_known, bool *ssp)
1da177e4 489{
30883512 490 struct discovery_state *cache = &hdev->discovery;
70f23020 491 struct inquiry_entry *ie;
1da177e4
LT
492
493 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
494
388fc8fa
JH
495 if (ssp)
496 *ssp = data->ssp_mode;
497
70f23020 498 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 499 if (ie) {
388fc8fa
JH
500 if (ie->data.ssp_mode && ssp)
501 *ssp = true;
502
a3d4e20a
JH
503 if (ie->name_state == NAME_NEEDED &&
504 data->rssi != ie->data.rssi) {
505 ie->data.rssi = data->rssi;
506 hci_inquiry_cache_update_resolve(hdev, ie);
507 }
508
561aafbc 509 goto update;
a3d4e20a 510 }
561aafbc
JH
511
512 /* Entry not in the cache. Add new one. */
513 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
514 if (!ie)
3175405b 515 return false;
561aafbc
JH
516
517 list_add(&ie->all, &cache->all);
518
519 if (name_known) {
520 ie->name_state = NAME_KNOWN;
521 } else {
522 ie->name_state = NAME_NOT_KNOWN;
523 list_add(&ie->list, &cache->unknown);
524 }
70f23020 525
561aafbc
JH
526update:
527 if (name_known && ie->name_state != NAME_KNOWN &&
528 ie->name_state != NAME_PENDING) {
529 ie->name_state = NAME_KNOWN;
530 list_del(&ie->list);
1da177e4
LT
531 }
532
70f23020
AE
533 memcpy(&ie->data, data, sizeof(*data));
534 ie->timestamp = jiffies;
1da177e4 535 cache->timestamp = jiffies;
3175405b
JH
536
537 if (ie->name_state == NAME_NOT_KNOWN)
538 return false;
539
540 return true;
1da177e4
LT
541}
542
543static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
544{
30883512 545 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
546 struct inquiry_info *info = (struct inquiry_info *) buf;
547 struct inquiry_entry *e;
548 int copied = 0;
549
561aafbc 550 list_for_each_entry(e, &cache->all, all) {
1da177e4 551 struct inquiry_data *data = &e->data;
b57c1a56
JH
552
553 if (copied >= num)
554 break;
555
1da177e4
LT
556 bacpy(&info->bdaddr, &data->bdaddr);
557 info->pscan_rep_mode = data->pscan_rep_mode;
558 info->pscan_period_mode = data->pscan_period_mode;
559 info->pscan_mode = data->pscan_mode;
560 memcpy(info->dev_class, data->dev_class, 3);
561 info->clock_offset = data->clock_offset;
b57c1a56 562
1da177e4 563 info++;
b57c1a56 564 copied++;
1da177e4
LT
565 }
566
567 BT_DBG("cache %p, copied %d", cache, copied);
568 return copied;
569}
570
571static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
572{
573 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
574 struct hci_cp_inquiry cp;
575
576 BT_DBG("%s", hdev->name);
577
578 if (test_bit(HCI_INQUIRY, &hdev->flags))
579 return;
580
581 /* Start Inquiry */
582 memcpy(&cp.lap, &ir->lap, 3);
583 cp.length = ir->length;
584 cp.num_rsp = ir->num_rsp;
a9de9248 585 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
586}
587
588int hci_inquiry(void __user *arg)
589{
590 __u8 __user *ptr = arg;
591 struct hci_inquiry_req ir;
592 struct hci_dev *hdev;
593 int err = 0, do_inquiry = 0, max_rsp;
594 long timeo;
595 __u8 *buf;
596
597 if (copy_from_user(&ir, ptr, sizeof(ir)))
598 return -EFAULT;
599
5a08ecce
AE
600 hdev = hci_dev_get(ir.dev_id);
601 if (!hdev)
1da177e4
LT
602 return -ENODEV;
603
09fd0de5 604 hci_dev_lock(hdev);
8e87d142 605 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
606 inquiry_cache_empty(hdev) ||
607 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
608 inquiry_cache_flush(hdev);
609 do_inquiry = 1;
610 }
09fd0de5 611 hci_dev_unlock(hdev);
1da177e4 612
04837f64 613 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
614
615 if (do_inquiry) {
616 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
617 if (err < 0)
618 goto done;
619 }
1da177e4
LT
620
621 /* for unlimited number of responses we will use buffer with 255 entries */
622 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
623
624 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
625 * copy it to the user space.
626 */
01df8c31 627 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 628 if (!buf) {
1da177e4
LT
629 err = -ENOMEM;
630 goto done;
631 }
632
09fd0de5 633 hci_dev_lock(hdev);
1da177e4 634 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 635 hci_dev_unlock(hdev);
1da177e4
LT
636
637 BT_DBG("num_rsp %d", ir.num_rsp);
638
639 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
640 ptr += sizeof(ir);
641 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
642 ir.num_rsp))
643 err = -EFAULT;
8e87d142 644 } else
1da177e4
LT
645 err = -EFAULT;
646
647 kfree(buf);
648
649done:
650 hci_dev_put(hdev);
651 return err;
652}
653
654/* ---- HCI ioctl helpers ---- */
655
656int hci_dev_open(__u16 dev)
657{
658 struct hci_dev *hdev;
659 int ret = 0;
660
5a08ecce
AE
661 hdev = hci_dev_get(dev);
662 if (!hdev)
1da177e4
LT
663 return -ENODEV;
664
665 BT_DBG("%s %p", hdev->name, hdev);
666
667 hci_req_lock(hdev);
668
611b30f7
MH
669 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
670 ret = -ERFKILL;
671 goto done;
672 }
673
1da177e4
LT
674 if (test_bit(HCI_UP, &hdev->flags)) {
675 ret = -EALREADY;
676 goto done;
677 }
678
679 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
680 set_bit(HCI_RAW, &hdev->flags);
681
07e3b94a
AE
682 /* Treat all non BR/EDR controllers as raw devices if
683 enable_hs is not set */
684 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
685 set_bit(HCI_RAW, &hdev->flags);
686
1da177e4
LT
687 if (hdev->open(hdev)) {
688 ret = -EIO;
689 goto done;
690 }
691
692 if (!test_bit(HCI_RAW, &hdev->flags)) {
693 atomic_set(&hdev->cmd_cnt, 1);
694 set_bit(HCI_INIT, &hdev->flags);
a5040efa 695 hdev->init_last_cmd = 0;
1da177e4 696
04837f64
MH
697 ret = __hci_request(hdev, hci_init_req, 0,
698 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 699
eead27da 700 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
701 ret = __hci_request(hdev, hci_le_init_req, 0,
702 msecs_to_jiffies(HCI_INIT_TIMEOUT));
703
1da177e4
LT
704 clear_bit(HCI_INIT, &hdev->flags);
705 }
706
707 if (!ret) {
708 hci_dev_hold(hdev);
709 set_bit(HCI_UP, &hdev->flags);
710 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 711 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 712 hci_dev_lock(hdev);
744cf19e 713 mgmt_powered(hdev, 1);
09fd0de5 714 hci_dev_unlock(hdev);
56e5cb86 715 }
8e87d142 716 } else {
1da177e4 717 /* Init failed, cleanup */
3eff45ea 718 flush_work(&hdev->tx_work);
c347b765 719 flush_work(&hdev->cmd_work);
b78752cc 720 flush_work(&hdev->rx_work);
1da177e4
LT
721
722 skb_queue_purge(&hdev->cmd_q);
723 skb_queue_purge(&hdev->rx_q);
724
725 if (hdev->flush)
726 hdev->flush(hdev);
727
728 if (hdev->sent_cmd) {
729 kfree_skb(hdev->sent_cmd);
730 hdev->sent_cmd = NULL;
731 }
732
733 hdev->close(hdev);
734 hdev->flags = 0;
735 }
736
737done:
738 hci_req_unlock(hdev);
739 hci_dev_put(hdev);
740 return ret;
741}
742
743static int hci_dev_do_close(struct hci_dev *hdev)
744{
745 BT_DBG("%s %p", hdev->name, hdev);
746
28b75a89
AG
747 cancel_work_sync(&hdev->le_scan);
748
1da177e4
LT
749 hci_req_cancel(hdev, ENODEV);
750 hci_req_lock(hdev);
751
752 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 753 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
754 hci_req_unlock(hdev);
755 return 0;
756 }
757
3eff45ea
GP
758 /* Flush RX and TX works */
759 flush_work(&hdev->tx_work);
b78752cc 760 flush_work(&hdev->rx_work);
1da177e4 761
16ab91ab 762 if (hdev->discov_timeout > 0) {
e0f9309f 763 cancel_delayed_work(&hdev->discov_off);
16ab91ab 764 hdev->discov_timeout = 0;
5e5282bb 765 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
766 }
767
a8b2d5c2 768 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
769 cancel_delayed_work(&hdev->service_cache);
770
7ba8b4be
AG
771 cancel_delayed_work_sync(&hdev->le_scan_disable);
772
09fd0de5 773 hci_dev_lock(hdev);
1da177e4
LT
774 inquiry_cache_flush(hdev);
775 hci_conn_hash_flush(hdev);
09fd0de5 776 hci_dev_unlock(hdev);
1da177e4
LT
777
778 hci_notify(hdev, HCI_DEV_DOWN);
779
780 if (hdev->flush)
781 hdev->flush(hdev);
782
783 /* Reset device */
784 skb_queue_purge(&hdev->cmd_q);
785 atomic_set(&hdev->cmd_cnt, 1);
8af59467
JH
786 if (!test_bit(HCI_RAW, &hdev->flags) &&
787 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
1da177e4 788 set_bit(HCI_INIT, &hdev->flags);
04837f64 789 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 790 msecs_to_jiffies(250));
1da177e4
LT
791 clear_bit(HCI_INIT, &hdev->flags);
792 }
793
c347b765
GP
794 /* flush cmd work */
795 flush_work(&hdev->cmd_work);
1da177e4
LT
796
797 /* Drop queues */
798 skb_queue_purge(&hdev->rx_q);
799 skb_queue_purge(&hdev->cmd_q);
800 skb_queue_purge(&hdev->raw_q);
801
802 /* Drop last sent command */
803 if (hdev->sent_cmd) {
b79f44c1 804 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
805 kfree_skb(hdev->sent_cmd);
806 hdev->sent_cmd = NULL;
807 }
808
809 /* After this point our queues are empty
810 * and no tasks are scheduled. */
811 hdev->close(hdev);
812
8ee56540
MH
813 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
814 hci_dev_lock(hdev);
815 mgmt_powered(hdev, 0);
816 hci_dev_unlock(hdev);
817 }
5add6af8 818
1da177e4
LT
819 /* Clear flags */
820 hdev->flags = 0;
821
e59fda8d 822 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 823 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 824
1da177e4
LT
825 hci_req_unlock(hdev);
826
827 hci_dev_put(hdev);
828 return 0;
829}
830
831int hci_dev_close(__u16 dev)
832{
833 struct hci_dev *hdev;
834 int err;
835
70f23020
AE
836 hdev = hci_dev_get(dev);
837 if (!hdev)
1da177e4 838 return -ENODEV;
8ee56540
MH
839
840 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
841 cancel_delayed_work(&hdev->power_off);
842
1da177e4 843 err = hci_dev_do_close(hdev);
8ee56540 844
1da177e4
LT
845 hci_dev_put(hdev);
846 return err;
847}
848
849int hci_dev_reset(__u16 dev)
850{
851 struct hci_dev *hdev;
852 int ret = 0;
853
70f23020
AE
854 hdev = hci_dev_get(dev);
855 if (!hdev)
1da177e4
LT
856 return -ENODEV;
857
858 hci_req_lock(hdev);
1da177e4
LT
859
860 if (!test_bit(HCI_UP, &hdev->flags))
861 goto done;
862
863 /* Drop queues */
864 skb_queue_purge(&hdev->rx_q);
865 skb_queue_purge(&hdev->cmd_q);
866
09fd0de5 867 hci_dev_lock(hdev);
1da177e4
LT
868 inquiry_cache_flush(hdev);
869 hci_conn_hash_flush(hdev);
09fd0de5 870 hci_dev_unlock(hdev);
1da177e4
LT
871
872 if (hdev->flush)
873 hdev->flush(hdev);
874
8e87d142 875 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 876 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
877
878 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
879 ret = __hci_request(hdev, hci_reset_req, 0,
880 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
881
882done:
1da177e4
LT
883 hci_req_unlock(hdev);
884 hci_dev_put(hdev);
885 return ret;
886}
887
888int hci_dev_reset_stat(__u16 dev)
889{
890 struct hci_dev *hdev;
891 int ret = 0;
892
70f23020
AE
893 hdev = hci_dev_get(dev);
894 if (!hdev)
1da177e4
LT
895 return -ENODEV;
896
897 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
898
899 hci_dev_put(hdev);
900
901 return ret;
902}
903
904int hci_dev_cmd(unsigned int cmd, void __user *arg)
905{
906 struct hci_dev *hdev;
907 struct hci_dev_req dr;
908 int err = 0;
909
910 if (copy_from_user(&dr, arg, sizeof(dr)))
911 return -EFAULT;
912
70f23020
AE
913 hdev = hci_dev_get(dr.dev_id);
914 if (!hdev)
1da177e4
LT
915 return -ENODEV;
916
917 switch (cmd) {
918 case HCISETAUTH:
04837f64
MH
919 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
920 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
921 break;
922
923 case HCISETENCRYPT:
924 if (!lmp_encrypt_capable(hdev)) {
925 err = -EOPNOTSUPP;
926 break;
927 }
928
929 if (!test_bit(HCI_AUTH, &hdev->flags)) {
930 /* Auth must be enabled first */
04837f64
MH
931 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
932 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
933 if (err)
934 break;
935 }
936
04837f64
MH
937 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
938 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
939 break;
940
941 case HCISETSCAN:
04837f64
MH
942 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
943 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
944 break;
945
1da177e4 946 case HCISETLINKPOL:
e4e8e37c
MH
947 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
948 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
949 break;
950
951 case HCISETLINKMODE:
e4e8e37c
MH
952 hdev->link_mode = ((__u16) dr.dev_opt) &
953 (HCI_LM_MASTER | HCI_LM_ACCEPT);
954 break;
955
956 case HCISETPTYPE:
957 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
958 break;
959
960 case HCISETACLMTU:
e4e8e37c
MH
961 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
962 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
963 break;
964
965 case HCISETSCOMTU:
e4e8e37c
MH
966 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
967 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
968 break;
969
970 default:
971 err = -EINVAL;
972 break;
973 }
e4e8e37c 974
1da177e4
LT
975 hci_dev_put(hdev);
976 return err;
977}
978
979int hci_get_dev_list(void __user *arg)
980{
8035ded4 981 struct hci_dev *hdev;
1da177e4
LT
982 struct hci_dev_list_req *dl;
983 struct hci_dev_req *dr;
1da177e4
LT
984 int n = 0, size, err;
985 __u16 dev_num;
986
987 if (get_user(dev_num, (__u16 __user *) arg))
988 return -EFAULT;
989
990 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
991 return -EINVAL;
992
993 size = sizeof(*dl) + dev_num * sizeof(*dr);
994
70f23020
AE
995 dl = kzalloc(size, GFP_KERNEL);
996 if (!dl)
1da177e4
LT
997 return -ENOMEM;
998
999 dr = dl->dev_req;
1000
f20d09d5 1001 read_lock(&hci_dev_list_lock);
8035ded4 1002 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1003 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1004 cancel_delayed_work(&hdev->power_off);
c542a06c 1005
a8b2d5c2
JH
1006 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1007 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1008
1da177e4
LT
1009 (dr + n)->dev_id = hdev->id;
1010 (dr + n)->dev_opt = hdev->flags;
c542a06c 1011
1da177e4
LT
1012 if (++n >= dev_num)
1013 break;
1014 }
f20d09d5 1015 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1016
1017 dl->dev_num = n;
1018 size = sizeof(*dl) + n * sizeof(*dr);
1019
1020 err = copy_to_user(arg, dl, size);
1021 kfree(dl);
1022
1023 return err ? -EFAULT : 0;
1024}
1025
1026int hci_get_dev_info(void __user *arg)
1027{
1028 struct hci_dev *hdev;
1029 struct hci_dev_info di;
1030 int err = 0;
1031
1032 if (copy_from_user(&di, arg, sizeof(di)))
1033 return -EFAULT;
1034
70f23020
AE
1035 hdev = hci_dev_get(di.dev_id);
1036 if (!hdev)
1da177e4
LT
1037 return -ENODEV;
1038
a8b2d5c2 1039 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1040 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1041
a8b2d5c2
JH
1042 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1043 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1044
1da177e4
LT
1045 strcpy(di.name, hdev->name);
1046 di.bdaddr = hdev->bdaddr;
943da25d 1047 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1048 di.flags = hdev->flags;
1049 di.pkt_type = hdev->pkt_type;
1050 di.acl_mtu = hdev->acl_mtu;
1051 di.acl_pkts = hdev->acl_pkts;
1052 di.sco_mtu = hdev->sco_mtu;
1053 di.sco_pkts = hdev->sco_pkts;
1054 di.link_policy = hdev->link_policy;
1055 di.link_mode = hdev->link_mode;
1056
1057 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1058 memcpy(&di.features, &hdev->features, sizeof(di.features));
1059
1060 if (copy_to_user(arg, &di, sizeof(di)))
1061 err = -EFAULT;
1062
1063 hci_dev_put(hdev);
1064
1065 return err;
1066}
1067
1068/* ---- Interface to HCI drivers ---- */
1069
611b30f7
MH
1070static int hci_rfkill_set_block(void *data, bool blocked)
1071{
1072 struct hci_dev *hdev = data;
1073
1074 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1075
1076 if (!blocked)
1077 return 0;
1078
1079 hci_dev_do_close(hdev);
1080
1081 return 0;
1082}
1083
1084static const struct rfkill_ops hci_rfkill_ops = {
1085 .set_block = hci_rfkill_set_block,
1086};
1087
1da177e4
LT
1088/* Alloc HCI device */
1089struct hci_dev *hci_alloc_dev(void)
1090{
1091 struct hci_dev *hdev;
1092
25ea6db0 1093 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
1094 if (!hdev)
1095 return NULL;
1096
0ac7e700 1097 hci_init_sysfs(hdev);
1da177e4
LT
1098 skb_queue_head_init(&hdev->driver_init);
1099
1100 return hdev;
1101}
1102EXPORT_SYMBOL(hci_alloc_dev);
1103
1104/* Free HCI device */
1105void hci_free_dev(struct hci_dev *hdev)
1106{
1107 skb_queue_purge(&hdev->driver_init);
1108
a91f2e39
MH
1109 /* will free via device release */
1110 put_device(&hdev->dev);
1da177e4
LT
1111}
1112EXPORT_SYMBOL(hci_free_dev);
1113
ab81cbf9
JH
1114static void hci_power_on(struct work_struct *work)
1115{
1116 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1117
1118 BT_DBG("%s", hdev->name);
1119
1120 if (hci_dev_open(hdev->id) < 0)
1121 return;
1122
a8b2d5c2 1123 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1124 schedule_delayed_work(&hdev->power_off,
3243553f 1125 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1126
a8b2d5c2 1127 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1128 mgmt_index_added(hdev);
ab81cbf9
JH
1129}
1130
1131static void hci_power_off(struct work_struct *work)
1132{
3243553f
JH
1133 struct hci_dev *hdev = container_of(work, struct hci_dev,
1134 power_off.work);
ab81cbf9
JH
1135
1136 BT_DBG("%s", hdev->name);
1137
8ee56540 1138 hci_dev_do_close(hdev);
ab81cbf9
JH
1139}
1140
16ab91ab
JH
1141static void hci_discov_off(struct work_struct *work)
1142{
1143 struct hci_dev *hdev;
1144 u8 scan = SCAN_PAGE;
1145
1146 hdev = container_of(work, struct hci_dev, discov_off.work);
1147
1148 BT_DBG("%s", hdev->name);
1149
09fd0de5 1150 hci_dev_lock(hdev);
16ab91ab
JH
1151
1152 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1153
1154 hdev->discov_timeout = 0;
1155
09fd0de5 1156 hci_dev_unlock(hdev);
16ab91ab
JH
1157}
1158
2aeb9a1a
JH
1159int hci_uuids_clear(struct hci_dev *hdev)
1160{
1161 struct list_head *p, *n;
1162
1163 list_for_each_safe(p, n, &hdev->uuids) {
1164 struct bt_uuid *uuid;
1165
1166 uuid = list_entry(p, struct bt_uuid, list);
1167
1168 list_del(p);
1169 kfree(uuid);
1170 }
1171
1172 return 0;
1173}
1174
55ed8ca1
JH
1175int hci_link_keys_clear(struct hci_dev *hdev)
1176{
1177 struct list_head *p, *n;
1178
1179 list_for_each_safe(p, n, &hdev->link_keys) {
1180 struct link_key *key;
1181
1182 key = list_entry(p, struct link_key, list);
1183
1184 list_del(p);
1185 kfree(key);
1186 }
1187
1188 return 0;
1189}
1190
b899efaf
VCG
1191int hci_smp_ltks_clear(struct hci_dev *hdev)
1192{
1193 struct smp_ltk *k, *tmp;
1194
1195 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1196 list_del(&k->list);
1197 kfree(k);
1198 }
1199
1200 return 0;
1201}
1202
55ed8ca1
JH
1203struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1204{
8035ded4 1205 struct link_key *k;
55ed8ca1 1206
8035ded4 1207 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1208 if (bacmp(bdaddr, &k->bdaddr) == 0)
1209 return k;
55ed8ca1
JH
1210
1211 return NULL;
1212}
1213
d25e28ab
JH
1214static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1215 u8 key_type, u8 old_key_type)
1216{
1217 /* Legacy key */
1218 if (key_type < 0x03)
1219 return 1;
1220
1221 /* Debug keys are insecure so don't store them persistently */
1222 if (key_type == HCI_LK_DEBUG_COMBINATION)
1223 return 0;
1224
1225 /* Changed combination key and there's no previous one */
1226 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1227 return 0;
1228
1229 /* Security mode 3 case */
1230 if (!conn)
1231 return 1;
1232
1233 /* Neither local nor remote side had no-bonding as requirement */
1234 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1235 return 1;
1236
1237 /* Local side had dedicated bonding as requirement */
1238 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1239 return 1;
1240
1241 /* Remote side had dedicated bonding as requirement */
1242 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1243 return 1;
1244
1245 /* If none of the above criteria match, then don't store the key
1246 * persistently */
1247 return 0;
1248}
1249
c9839a11 1250struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1251{
c9839a11 1252 struct smp_ltk *k;
75d262c2 1253
c9839a11
VCG
1254 list_for_each_entry(k, &hdev->long_term_keys, list) {
1255 if (k->ediv != ediv ||
1256 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1257 continue;
1258
c9839a11 1259 return k;
75d262c2
VCG
1260 }
1261
1262 return NULL;
1263}
1264EXPORT_SYMBOL(hci_find_ltk);
1265
c9839a11
VCG
1266struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1267 u8 addr_type)
75d262c2 1268{
c9839a11 1269 struct smp_ltk *k;
75d262c2 1270
c9839a11
VCG
1271 list_for_each_entry(k, &hdev->long_term_keys, list)
1272 if (addr_type == k->bdaddr_type &&
1273 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1274 return k;
1275
1276 return NULL;
1277}
c9839a11 1278EXPORT_SYMBOL(hci_find_ltk_by_addr);
75d262c2 1279
d25e28ab
JH
1280int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1281 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1282{
1283 struct link_key *key, *old_key;
4df378a1 1284 u8 old_key_type, persistent;
55ed8ca1
JH
1285
1286 old_key = hci_find_link_key(hdev, bdaddr);
1287 if (old_key) {
1288 old_key_type = old_key->type;
1289 key = old_key;
1290 } else {
12adcf3a 1291 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1292 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1293 if (!key)
1294 return -ENOMEM;
1295 list_add(&key->list, &hdev->link_keys);
1296 }
1297
1298 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1299
d25e28ab
JH
1300 /* Some buggy controller combinations generate a changed
1301 * combination key for legacy pairing even when there's no
1302 * previous key */
1303 if (type == HCI_LK_CHANGED_COMBINATION &&
1304 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1305 old_key_type == 0xff) {
d25e28ab 1306 type = HCI_LK_COMBINATION;
655fe6ec
JH
1307 if (conn)
1308 conn->key_type = type;
1309 }
d25e28ab 1310
55ed8ca1
JH
1311 bacpy(&key->bdaddr, bdaddr);
1312 memcpy(key->val, val, 16);
55ed8ca1
JH
1313 key->pin_len = pin_len;
1314
b6020ba0 1315 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1316 key->type = old_key_type;
4748fed2
JH
1317 else
1318 key->type = type;
1319
4df378a1
JH
1320 if (!new_key)
1321 return 0;
1322
1323 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1324
744cf19e 1325 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1326
1327 if (!persistent) {
1328 list_del(&key->list);
1329 kfree(key);
1330 }
55ed8ca1
JH
1331
1332 return 0;
1333}
1334
c9839a11
VCG
1335int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1336 int new_key, u8 authenticated, u8 tk[16],
1337 u8 enc_size, u16 ediv, u8 rand[8])
75d262c2 1338{
c9839a11 1339 struct smp_ltk *key, *old_key;
75d262c2 1340
c9839a11
VCG
1341 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1342 return 0;
75d262c2 1343
c9839a11
VCG
1344 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1345 if (old_key)
75d262c2 1346 key = old_key;
c9839a11
VCG
1347 else {
1348 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1349 if (!key)
1350 return -ENOMEM;
c9839a11 1351 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1352 }
1353
75d262c2 1354 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1355 key->bdaddr_type = addr_type;
1356 memcpy(key->val, tk, sizeof(key->val));
1357 key->authenticated = authenticated;
1358 key->ediv = ediv;
1359 key->enc_size = enc_size;
1360 key->type = type;
1361 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1362
c9839a11
VCG
1363 if (!new_key)
1364 return 0;
75d262c2 1365
261cc5aa
VCG
1366 if (type & HCI_SMP_LTK)
1367 mgmt_new_ltk(hdev, key, 1);
1368
75d262c2
VCG
1369 return 0;
1370}
1371
55ed8ca1
JH
1372int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1373{
1374 struct link_key *key;
1375
1376 key = hci_find_link_key(hdev, bdaddr);
1377 if (!key)
1378 return -ENOENT;
1379
1380 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1381
1382 list_del(&key->list);
1383 kfree(key);
1384
1385 return 0;
1386}
1387
b899efaf
VCG
1388int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1389{
1390 struct smp_ltk *k, *tmp;
1391
1392 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1393 if (bacmp(bdaddr, &k->bdaddr))
1394 continue;
1395
1396 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1397
1398 list_del(&k->list);
1399 kfree(k);
1400 }
1401
1402 return 0;
1403}
1404
6bd32326
VT
1405/* HCI command timer function */
1406static void hci_cmd_timer(unsigned long arg)
1407{
1408 struct hci_dev *hdev = (void *) arg;
1409
1410 BT_ERR("%s command tx timeout", hdev->name);
1411 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1412 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1413}
1414
2763eda6
SJ
1415struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1416 bdaddr_t *bdaddr)
1417{
1418 struct oob_data *data;
1419
1420 list_for_each_entry(data, &hdev->remote_oob_data, list)
1421 if (bacmp(bdaddr, &data->bdaddr) == 0)
1422 return data;
1423
1424 return NULL;
1425}
1426
1427int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1428{
1429 struct oob_data *data;
1430
1431 data = hci_find_remote_oob_data(hdev, bdaddr);
1432 if (!data)
1433 return -ENOENT;
1434
1435 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1436
1437 list_del(&data->list);
1438 kfree(data);
1439
1440 return 0;
1441}
1442
1443int hci_remote_oob_data_clear(struct hci_dev *hdev)
1444{
1445 struct oob_data *data, *n;
1446
1447 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1448 list_del(&data->list);
1449 kfree(data);
1450 }
1451
1452 return 0;
1453}
1454
1455int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1456 u8 *randomizer)
1457{
1458 struct oob_data *data;
1459
1460 data = hci_find_remote_oob_data(hdev, bdaddr);
1461
1462 if (!data) {
1463 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1464 if (!data)
1465 return -ENOMEM;
1466
1467 bacpy(&data->bdaddr, bdaddr);
1468 list_add(&data->list, &hdev->remote_oob_data);
1469 }
1470
1471 memcpy(data->hash, hash, sizeof(data->hash));
1472 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1473
1474 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1475
1476 return 0;
1477}
1478
b2a66aad
AJ
1479struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1480 bdaddr_t *bdaddr)
1481{
8035ded4 1482 struct bdaddr_list *b;
b2a66aad 1483
8035ded4 1484 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1485 if (bacmp(bdaddr, &b->bdaddr) == 0)
1486 return b;
b2a66aad
AJ
1487
1488 return NULL;
1489}
1490
1491int hci_blacklist_clear(struct hci_dev *hdev)
1492{
1493 struct list_head *p, *n;
1494
1495 list_for_each_safe(p, n, &hdev->blacklist) {
1496 struct bdaddr_list *b;
1497
1498 b = list_entry(p, struct bdaddr_list, list);
1499
1500 list_del(p);
1501 kfree(b);
1502 }
1503
1504 return 0;
1505}
1506
88c1fe4b 1507int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1508{
1509 struct bdaddr_list *entry;
b2a66aad
AJ
1510
1511 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1512 return -EBADF;
1513
5e762444
AJ
1514 if (hci_blacklist_lookup(hdev, bdaddr))
1515 return -EEXIST;
b2a66aad
AJ
1516
1517 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1518 if (!entry)
1519 return -ENOMEM;
b2a66aad
AJ
1520
1521 bacpy(&entry->bdaddr, bdaddr);
1522
1523 list_add(&entry->list, &hdev->blacklist);
1524
88c1fe4b 1525 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1526}
1527
88c1fe4b 1528int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1529{
1530 struct bdaddr_list *entry;
b2a66aad 1531
1ec918ce 1532 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1533 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1534
1535 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1536 if (!entry)
5e762444 1537 return -ENOENT;
b2a66aad
AJ
1538
1539 list_del(&entry->list);
1540 kfree(entry);
1541
88c1fe4b 1542 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1543}
1544
db323f2f 1545static void hci_clear_adv_cache(struct work_struct *work)
35815085 1546{
db323f2f
GP
1547 struct hci_dev *hdev = container_of(work, struct hci_dev,
1548 adv_work.work);
35815085
AG
1549
1550 hci_dev_lock(hdev);
1551
1552 hci_adv_entries_clear(hdev);
1553
1554 hci_dev_unlock(hdev);
1555}
1556
76c8686f
AG
1557int hci_adv_entries_clear(struct hci_dev *hdev)
1558{
1559 struct adv_entry *entry, *tmp;
1560
1561 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1562 list_del(&entry->list);
1563 kfree(entry);
1564 }
1565
1566 BT_DBG("%s adv cache cleared", hdev->name);
1567
1568 return 0;
1569}
1570
1571struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1572{
1573 struct adv_entry *entry;
1574
1575 list_for_each_entry(entry, &hdev->adv_entries, list)
1576 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1577 return entry;
1578
1579 return NULL;
1580}
1581
1582static inline int is_connectable_adv(u8 evt_type)
1583{
1584 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1585 return 1;
1586
1587 return 0;
1588}
1589
1590int hci_add_adv_entry(struct hci_dev *hdev,
1591 struct hci_ev_le_advertising_info *ev)
1592{
1593 struct adv_entry *entry;
1594
1595 if (!is_connectable_adv(ev->evt_type))
1596 return -EINVAL;
1597
1598 /* Only new entries should be added to adv_entries. So, if
1599 * bdaddr was found, don't add it. */
1600 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1601 return 0;
1602
4777bfde 1603 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
76c8686f
AG
1604 if (!entry)
1605 return -ENOMEM;
1606
1607 bacpy(&entry->bdaddr, &ev->bdaddr);
1608 entry->bdaddr_type = ev->bdaddr_type;
1609
1610 list_add(&entry->list, &hdev->adv_entries);
1611
1612 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1613 batostr(&entry->bdaddr), entry->bdaddr_type);
1614
1615 return 0;
1616}
1617
7ba8b4be
AG
1618static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1619{
1620 struct le_scan_params *param = (struct le_scan_params *) opt;
1621 struct hci_cp_le_set_scan_param cp;
1622
1623 memset(&cp, 0, sizeof(cp));
1624 cp.type = param->type;
1625 cp.interval = cpu_to_le16(param->interval);
1626 cp.window = cpu_to_le16(param->window);
1627
1628 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1629}
1630
1631static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1632{
1633 struct hci_cp_le_set_scan_enable cp;
1634
1635 memset(&cp, 0, sizeof(cp));
1636 cp.enable = 1;
1637
1638 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1639}
1640
1641static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1642 u16 window, int timeout)
1643{
1644 long timeo = msecs_to_jiffies(3000);
1645 struct le_scan_params param;
1646 int err;
1647
1648 BT_DBG("%s", hdev->name);
1649
1650 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1651 return -EINPROGRESS;
1652
1653 param.type = type;
1654 param.interval = interval;
1655 param.window = window;
1656
1657 hci_req_lock(hdev);
1658
1659 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1660 timeo);
1661 if (!err)
1662 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1663
1664 hci_req_unlock(hdev);
1665
1666 if (err < 0)
1667 return err;
1668
1669 schedule_delayed_work(&hdev->le_scan_disable,
1670 msecs_to_jiffies(timeout));
1671
1672 return 0;
1673}
1674
1675static void le_scan_disable_work(struct work_struct *work)
1676{
1677 struct hci_dev *hdev = container_of(work, struct hci_dev,
1678 le_scan_disable.work);
1679 struct hci_cp_le_set_scan_enable cp;
1680
1681 BT_DBG("%s", hdev->name);
1682
1683 memset(&cp, 0, sizeof(cp));
1684
1685 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1686}
1687
28b75a89
AG
1688static void le_scan_work(struct work_struct *work)
1689{
1690 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1691 struct le_scan_params *param = &hdev->le_scan_params;
1692
1693 BT_DBG("%s", hdev->name);
1694
1695 hci_do_le_scan(hdev, param->type, param->interval,
1696 param->window, param->timeout);
1697}
1698
1699int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1700 int timeout)
1701{
1702 struct le_scan_params *param = &hdev->le_scan_params;
1703
1704 BT_DBG("%s", hdev->name);
1705
1706 if (work_busy(&hdev->le_scan))
1707 return -EINPROGRESS;
1708
1709 param->type = type;
1710 param->interval = interval;
1711 param->window = window;
1712 param->timeout = timeout;
1713
1714 queue_work(system_long_wq, &hdev->le_scan);
1715
1716 return 0;
1717}
1718
1da177e4
LT
1719/* Register HCI device */
1720int hci_register_dev(struct hci_dev *hdev)
1721{
1722 struct list_head *head = &hci_dev_list, *p;
08add513 1723 int i, id, error;
1da177e4 1724
e9b9cfa1 1725 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1726
010666a1 1727 if (!hdev->open || !hdev->close)
1da177e4
LT
1728 return -EINVAL;
1729
08add513
MM
1730 /* Do not allow HCI_AMP devices to register at index 0,
1731 * so the index can be used as the AMP controller ID.
1732 */
1733 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1734
f20d09d5 1735 write_lock(&hci_dev_list_lock);
1da177e4
LT
1736
1737 /* Find first available device id */
1738 list_for_each(p, &hci_dev_list) {
1739 if (list_entry(p, struct hci_dev, list)->id != id)
1740 break;
1741 head = p; id++;
1742 }
8e87d142 1743
1da177e4
LT
1744 sprintf(hdev->name, "hci%d", id);
1745 hdev->id = id;
c6feeb28 1746 list_add_tail(&hdev->list, head);
1da177e4 1747
09fd0de5 1748 mutex_init(&hdev->lock);
1da177e4
LT
1749
1750 hdev->flags = 0;
d23264a8 1751 hdev->dev_flags = 0;
1da177e4 1752 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1753 hdev->esco_type = (ESCO_HV1);
1da177e4 1754 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1755 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1756
04837f64
MH
1757 hdev->idle_timeout = 0;
1758 hdev->sniff_max_interval = 800;
1759 hdev->sniff_min_interval = 80;
1760
b78752cc 1761 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1762 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1763 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1764
1da177e4
LT
1765
1766 skb_queue_head_init(&hdev->rx_q);
1767 skb_queue_head_init(&hdev->cmd_q);
1768 skb_queue_head_init(&hdev->raw_q);
1769
6bd32326
VT
1770 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1771
cd4c5391 1772 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1773 hdev->reassembly[i] = NULL;
1774
1da177e4 1775 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1776 mutex_init(&hdev->req_lock);
1da177e4 1777
30883512 1778 discovery_init(hdev);
1da177e4
LT
1779
1780 hci_conn_hash_init(hdev);
1781
2e58ef3e
JH
1782 INIT_LIST_HEAD(&hdev->mgmt_pending);
1783
ea4bd8ba 1784 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1785
2aeb9a1a
JH
1786 INIT_LIST_HEAD(&hdev->uuids);
1787
55ed8ca1 1788 INIT_LIST_HEAD(&hdev->link_keys);
b899efaf 1789 INIT_LIST_HEAD(&hdev->long_term_keys);
55ed8ca1 1790
2763eda6
SJ
1791 INIT_LIST_HEAD(&hdev->remote_oob_data);
1792
76c8686f
AG
1793 INIT_LIST_HEAD(&hdev->adv_entries);
1794
db323f2f 1795 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1796 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1797 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1798
16ab91ab
JH
1799 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1800
1da177e4
LT
1801 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1802
1803 atomic_set(&hdev->promisc, 0);
1804
28b75a89
AG
1805 INIT_WORK(&hdev->le_scan, le_scan_work);
1806
7ba8b4be
AG
1807 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1808
f20d09d5 1809 write_unlock(&hci_dev_list_lock);
1da177e4 1810
32845eb1
GP
1811 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1812 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1813 if (!hdev->workqueue) {
1814 error = -ENOMEM;
1815 goto err;
1816 }
f48fd9c8 1817
33ca954d
DH
1818 error = hci_add_sysfs(hdev);
1819 if (error < 0)
1820 goto err_wqueue;
1da177e4 1821
611b30f7
MH
1822 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1823 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1824 if (hdev->rfkill) {
1825 if (rfkill_register(hdev->rfkill) < 0) {
1826 rfkill_destroy(hdev->rfkill);
1827 hdev->rfkill = NULL;
1828 }
1829 }
1830
a8b2d5c2
JH
1831 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1832 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1833 schedule_work(&hdev->power_on);
ab81cbf9 1834
1da177e4 1835 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1836 hci_dev_hold(hdev);
1da177e4
LT
1837
1838 return id;
f48fd9c8 1839
33ca954d
DH
1840err_wqueue:
1841 destroy_workqueue(hdev->workqueue);
1842err:
f20d09d5 1843 write_lock(&hci_dev_list_lock);
f48fd9c8 1844 list_del(&hdev->list);
f20d09d5 1845 write_unlock(&hci_dev_list_lock);
f48fd9c8 1846
33ca954d 1847 return error;
1da177e4
LT
1848}
1849EXPORT_SYMBOL(hci_register_dev);
1850
1851/* Unregister HCI device */
59735631 1852void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1853{
ef222013
MH
1854 int i;
1855
c13854ce 1856 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1857
f20d09d5 1858 write_lock(&hci_dev_list_lock);
1da177e4 1859 list_del(&hdev->list);
f20d09d5 1860 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1861
1862 hci_dev_do_close(hdev);
1863
cd4c5391 1864 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1865 kfree_skb(hdev->reassembly[i]);
1866
ab81cbf9 1867 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8b2d5c2 1868 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1869 hci_dev_lock(hdev);
744cf19e 1870 mgmt_index_removed(hdev);
09fd0de5 1871 hci_dev_unlock(hdev);
56e5cb86 1872 }
ab81cbf9 1873
2e58ef3e
JH
1874 /* mgmt_index_removed should take care of emptying the
1875 * pending list */
1876 BUG_ON(!list_empty(&hdev->mgmt_pending));
1877
1da177e4
LT
1878 hci_notify(hdev, HCI_DEV_UNREG);
1879
611b30f7
MH
1880 if (hdev->rfkill) {
1881 rfkill_unregister(hdev->rfkill);
1882 rfkill_destroy(hdev->rfkill);
1883 }
1884
ce242970 1885 hci_del_sysfs(hdev);
147e2d59 1886
db323f2f 1887 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1888
f48fd9c8
MH
1889 destroy_workqueue(hdev->workqueue);
1890
09fd0de5 1891 hci_dev_lock(hdev);
e2e0cacb 1892 hci_blacklist_clear(hdev);
2aeb9a1a 1893 hci_uuids_clear(hdev);
55ed8ca1 1894 hci_link_keys_clear(hdev);
b899efaf 1895 hci_smp_ltks_clear(hdev);
2763eda6 1896 hci_remote_oob_data_clear(hdev);
76c8686f 1897 hci_adv_entries_clear(hdev);
09fd0de5 1898 hci_dev_unlock(hdev);
e2e0cacb 1899
dc946bd8 1900 hci_dev_put(hdev);
1da177e4
LT
1901}
1902EXPORT_SYMBOL(hci_unregister_dev);
1903
1904/* Suspend HCI device */
1905int hci_suspend_dev(struct hci_dev *hdev)
1906{
1907 hci_notify(hdev, HCI_DEV_SUSPEND);
1908 return 0;
1909}
1910EXPORT_SYMBOL(hci_suspend_dev);
1911
1912/* Resume HCI device */
1913int hci_resume_dev(struct hci_dev *hdev)
1914{
1915 hci_notify(hdev, HCI_DEV_RESUME);
1916 return 0;
1917}
1918EXPORT_SYMBOL(hci_resume_dev);
1919
76bca880
MH
1920/* Receive frame from HCI drivers */
1921int hci_recv_frame(struct sk_buff *skb)
1922{
1923 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1924 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1925 && !test_bit(HCI_INIT, &hdev->flags))) {
1926 kfree_skb(skb);
1927 return -ENXIO;
1928 }
1929
1930 /* Incomming skb */
1931 bt_cb(skb)->incoming = 1;
1932
1933 /* Time stamp */
1934 __net_timestamp(skb);
1935
76bca880 1936 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1937 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1938
76bca880
MH
1939 return 0;
1940}
1941EXPORT_SYMBOL(hci_recv_frame);
1942
33e882a5 1943static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1944 int count, __u8 index)
33e882a5
SS
1945{
1946 int len = 0;
1947 int hlen = 0;
1948 int remain = count;
1949 struct sk_buff *skb;
1950 struct bt_skb_cb *scb;
1951
1952 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1953 index >= NUM_REASSEMBLY)
1954 return -EILSEQ;
1955
1956 skb = hdev->reassembly[index];
1957
1958 if (!skb) {
1959 switch (type) {
1960 case HCI_ACLDATA_PKT:
1961 len = HCI_MAX_FRAME_SIZE;
1962 hlen = HCI_ACL_HDR_SIZE;
1963 break;
1964 case HCI_EVENT_PKT:
1965 len = HCI_MAX_EVENT_SIZE;
1966 hlen = HCI_EVENT_HDR_SIZE;
1967 break;
1968 case HCI_SCODATA_PKT:
1969 len = HCI_MAX_SCO_SIZE;
1970 hlen = HCI_SCO_HDR_SIZE;
1971 break;
1972 }
1973
1e429f38 1974 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1975 if (!skb)
1976 return -ENOMEM;
1977
1978 scb = (void *) skb->cb;
1979 scb->expect = hlen;
1980 scb->pkt_type = type;
1981
1982 skb->dev = (void *) hdev;
1983 hdev->reassembly[index] = skb;
1984 }
1985
1986 while (count) {
1987 scb = (void *) skb->cb;
89bb46d0 1988 len = min_t(uint, scb->expect, count);
33e882a5
SS
1989
1990 memcpy(skb_put(skb, len), data, len);
1991
1992 count -= len;
1993 data += len;
1994 scb->expect -= len;
1995 remain = count;
1996
1997 switch (type) {
1998 case HCI_EVENT_PKT:
1999 if (skb->len == HCI_EVENT_HDR_SIZE) {
2000 struct hci_event_hdr *h = hci_event_hdr(skb);
2001 scb->expect = h->plen;
2002
2003 if (skb_tailroom(skb) < scb->expect) {
2004 kfree_skb(skb);
2005 hdev->reassembly[index] = NULL;
2006 return -ENOMEM;
2007 }
2008 }
2009 break;
2010
2011 case HCI_ACLDATA_PKT:
2012 if (skb->len == HCI_ACL_HDR_SIZE) {
2013 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2014 scb->expect = __le16_to_cpu(h->dlen);
2015
2016 if (skb_tailroom(skb) < scb->expect) {
2017 kfree_skb(skb);
2018 hdev->reassembly[index] = NULL;
2019 return -ENOMEM;
2020 }
2021 }
2022 break;
2023
2024 case HCI_SCODATA_PKT:
2025 if (skb->len == HCI_SCO_HDR_SIZE) {
2026 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2027 scb->expect = h->dlen;
2028
2029 if (skb_tailroom(skb) < scb->expect) {
2030 kfree_skb(skb);
2031 hdev->reassembly[index] = NULL;
2032 return -ENOMEM;
2033 }
2034 }
2035 break;
2036 }
2037
2038 if (scb->expect == 0) {
2039 /* Complete frame */
2040
2041 bt_cb(skb)->pkt_type = type;
2042 hci_recv_frame(skb);
2043
2044 hdev->reassembly[index] = NULL;
2045 return remain;
2046 }
2047 }
2048
2049 return remain;
2050}
2051
ef222013
MH
2052int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2053{
f39a3c06
SS
2054 int rem = 0;
2055
ef222013
MH
2056 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2057 return -EILSEQ;
2058
da5f6c37 2059 while (count) {
1e429f38 2060 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2061 if (rem < 0)
2062 return rem;
ef222013 2063
f39a3c06
SS
2064 data += (count - rem);
2065 count = rem;
f81c6224 2066 }
ef222013 2067
f39a3c06 2068 return rem;
ef222013
MH
2069}
2070EXPORT_SYMBOL(hci_recv_fragment);
2071
99811510
SS
2072#define STREAM_REASSEMBLY 0
2073
2074int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2075{
2076 int type;
2077 int rem = 0;
2078
da5f6c37 2079 while (count) {
99811510
SS
2080 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2081
2082 if (!skb) {
2083 struct { char type; } *pkt;
2084
2085 /* Start of the frame */
2086 pkt = data;
2087 type = pkt->type;
2088
2089 data++;
2090 count--;
2091 } else
2092 type = bt_cb(skb)->pkt_type;
2093
1e429f38
GP
2094 rem = hci_reassembly(hdev, type, data, count,
2095 STREAM_REASSEMBLY);
99811510
SS
2096 if (rem < 0)
2097 return rem;
2098
2099 data += (count - rem);
2100 count = rem;
f81c6224 2101 }
99811510
SS
2102
2103 return rem;
2104}
2105EXPORT_SYMBOL(hci_recv_stream_fragment);
2106
1da177e4
LT
2107/* ---- Interface to upper protocols ---- */
2108
1da177e4
LT
2109int hci_register_cb(struct hci_cb *cb)
2110{
2111 BT_DBG("%p name %s", cb, cb->name);
2112
f20d09d5 2113 write_lock(&hci_cb_list_lock);
1da177e4 2114 list_add(&cb->list, &hci_cb_list);
f20d09d5 2115 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2116
2117 return 0;
2118}
2119EXPORT_SYMBOL(hci_register_cb);
2120
2121int hci_unregister_cb(struct hci_cb *cb)
2122{
2123 BT_DBG("%p name %s", cb, cb->name);
2124
f20d09d5 2125 write_lock(&hci_cb_list_lock);
1da177e4 2126 list_del(&cb->list);
f20d09d5 2127 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2128
2129 return 0;
2130}
2131EXPORT_SYMBOL(hci_unregister_cb);
2132
2133static int hci_send_frame(struct sk_buff *skb)
2134{
2135 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2136
2137 if (!hdev) {
2138 kfree_skb(skb);
2139 return -ENODEV;
2140 }
2141
0d48d939 2142 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2143
cd82e61c
MH
2144 /* Time stamp */
2145 __net_timestamp(skb);
1da177e4 2146
cd82e61c
MH
2147 /* Send copy to monitor */
2148 hci_send_to_monitor(hdev, skb);
2149
2150 if (atomic_read(&hdev->promisc)) {
2151 /* Send copy to the sockets */
470fe1b5 2152 hci_send_to_sock(hdev, skb);
1da177e4
LT
2153 }
2154
2155 /* Get rid of skb owner, prior to sending to the driver. */
2156 skb_orphan(skb);
2157
2158 return hdev->send(skb);
2159}
2160
2161/* Send HCI command */
a9de9248 2162int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2163{
2164 int len = HCI_COMMAND_HDR_SIZE + plen;
2165 struct hci_command_hdr *hdr;
2166 struct sk_buff *skb;
2167
a9de9248 2168 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2169
2170 skb = bt_skb_alloc(len, GFP_ATOMIC);
2171 if (!skb) {
ef222013 2172 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2173 return -ENOMEM;
2174 }
2175
2176 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2177 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2178 hdr->plen = plen;
2179
2180 if (plen)
2181 memcpy(skb_put(skb, plen), param, plen);
2182
2183 BT_DBG("skb len %d", skb->len);
2184
0d48d939 2185 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2186 skb->dev = (void *) hdev;
c78ae283 2187
a5040efa
JH
2188 if (test_bit(HCI_INIT, &hdev->flags))
2189 hdev->init_last_cmd = opcode;
2190
1da177e4 2191 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2192 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2193
2194 return 0;
2195}
1da177e4
LT
2196
2197/* Get data from the previously sent command */
a9de9248 2198void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2199{
2200 struct hci_command_hdr *hdr;
2201
2202 if (!hdev->sent_cmd)
2203 return NULL;
2204
2205 hdr = (void *) hdev->sent_cmd->data;
2206
a9de9248 2207 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2208 return NULL;
2209
a9de9248 2210 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2211
2212 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2213}
2214
2215/* Send ACL data */
2216static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2217{
2218 struct hci_acl_hdr *hdr;
2219 int len = skb->len;
2220
badff6d0
ACM
2221 skb_push(skb, HCI_ACL_HDR_SIZE);
2222 skb_reset_transport_header(skb);
9c70220b 2223 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2224 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2225 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2226}
2227
73d80deb
LAD
2228static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2229 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2230{
2231 struct hci_dev *hdev = conn->hdev;
2232 struct sk_buff *list;
2233
70f23020
AE
2234 list = skb_shinfo(skb)->frag_list;
2235 if (!list) {
1da177e4
LT
2236 /* Non fragmented */
2237 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2238
73d80deb 2239 skb_queue_tail(queue, skb);
1da177e4
LT
2240 } else {
2241 /* Fragmented */
2242 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2243
2244 skb_shinfo(skb)->frag_list = NULL;
2245
2246 /* Queue all fragments atomically */
af3e6359 2247 spin_lock(&queue->lock);
1da177e4 2248
73d80deb 2249 __skb_queue_tail(queue, skb);
e702112f
AE
2250
2251 flags &= ~ACL_START;
2252 flags |= ACL_CONT;
1da177e4
LT
2253 do {
2254 skb = list; list = list->next;
8e87d142 2255
1da177e4 2256 skb->dev = (void *) hdev;
0d48d939 2257 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2258 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2259
2260 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2261
73d80deb 2262 __skb_queue_tail(queue, skb);
1da177e4
LT
2263 } while (list);
2264
af3e6359 2265 spin_unlock(&queue->lock);
1da177e4 2266 }
73d80deb
LAD
2267}
2268
2269void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2270{
2271 struct hci_conn *conn = chan->conn;
2272 struct hci_dev *hdev = conn->hdev;
2273
2274 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2275
2276 skb->dev = (void *) hdev;
2277 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2278 hci_add_acl_hdr(skb, conn->handle, flags);
2279
2280 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2281
3eff45ea 2282 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2283}
2284EXPORT_SYMBOL(hci_send_acl);
2285
2286/* Send SCO data */
0d861d8b 2287void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2288{
2289 struct hci_dev *hdev = conn->hdev;
2290 struct hci_sco_hdr hdr;
2291
2292 BT_DBG("%s len %d", hdev->name, skb->len);
2293
aca3192c 2294 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2295 hdr.dlen = skb->len;
2296
badff6d0
ACM
2297 skb_push(skb, HCI_SCO_HDR_SIZE);
2298 skb_reset_transport_header(skb);
9c70220b 2299 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2300
2301 skb->dev = (void *) hdev;
0d48d939 2302 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2303
1da177e4 2304 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2305 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2306}
2307EXPORT_SYMBOL(hci_send_sco);
2308
2309/* ---- HCI TX task (outgoing data) ---- */
2310
2311/* HCI Connection scheduler */
2312static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2313{
2314 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2315 struct hci_conn *conn = NULL, *c;
1da177e4 2316 int num = 0, min = ~0;
1da177e4 2317
8e87d142 2318 /* We don't have to lock device here. Connections are always
1da177e4 2319 * added and removed with TX task disabled. */
bf4c6325
GP
2320
2321 rcu_read_lock();
2322
2323 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2324 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2325 continue;
769be974
MH
2326
2327 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2328 continue;
2329
1da177e4
LT
2330 num++;
2331
2332 if (c->sent < min) {
2333 min = c->sent;
2334 conn = c;
2335 }
52087a79
LAD
2336
2337 if (hci_conn_num(hdev, type) == num)
2338 break;
1da177e4
LT
2339 }
2340
bf4c6325
GP
2341 rcu_read_unlock();
2342
1da177e4 2343 if (conn) {
6ed58ec5
VT
2344 int cnt, q;
2345
2346 switch (conn->type) {
2347 case ACL_LINK:
2348 cnt = hdev->acl_cnt;
2349 break;
2350 case SCO_LINK:
2351 case ESCO_LINK:
2352 cnt = hdev->sco_cnt;
2353 break;
2354 case LE_LINK:
2355 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2356 break;
2357 default:
2358 cnt = 0;
2359 BT_ERR("Unknown link type");
2360 }
2361
2362 q = cnt / num;
1da177e4
LT
2363 *quote = q ? q : 1;
2364 } else
2365 *quote = 0;
2366
2367 BT_DBG("conn %p quote %d", conn, *quote);
2368 return conn;
2369}
2370
bae1f5d9 2371static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2372{
2373 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2374 struct hci_conn *c;
1da177e4 2375
bae1f5d9 2376 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2377
bf4c6325
GP
2378 rcu_read_lock();
2379
1da177e4 2380 /* Kill stalled connections */
bf4c6325 2381 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2382 if (c->type == type && c->sent) {
2383 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2384 hdev->name, batostr(&c->dst));
2385 hci_acl_disconn(c, 0x13);
2386 }
2387 }
bf4c6325
GP
2388
2389 rcu_read_unlock();
1da177e4
LT
2390}
2391
73d80deb
LAD
2392static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2393 int *quote)
1da177e4 2394{
73d80deb
LAD
2395 struct hci_conn_hash *h = &hdev->conn_hash;
2396 struct hci_chan *chan = NULL;
2397 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2398 struct hci_conn *conn;
73d80deb
LAD
2399 int cnt, q, conn_num = 0;
2400
2401 BT_DBG("%s", hdev->name);
2402
bf4c6325
GP
2403 rcu_read_lock();
2404
2405 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2406 struct hci_chan *tmp;
2407
2408 if (conn->type != type)
2409 continue;
2410
2411 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2412 continue;
2413
2414 conn_num++;
2415
8192edef 2416 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2417 struct sk_buff *skb;
2418
2419 if (skb_queue_empty(&tmp->data_q))
2420 continue;
2421
2422 skb = skb_peek(&tmp->data_q);
2423 if (skb->priority < cur_prio)
2424 continue;
2425
2426 if (skb->priority > cur_prio) {
2427 num = 0;
2428 min = ~0;
2429 cur_prio = skb->priority;
2430 }
2431
2432 num++;
2433
2434 if (conn->sent < min) {
2435 min = conn->sent;
2436 chan = tmp;
2437 }
2438 }
2439
2440 if (hci_conn_num(hdev, type) == conn_num)
2441 break;
2442 }
2443
bf4c6325
GP
2444 rcu_read_unlock();
2445
73d80deb
LAD
2446 if (!chan)
2447 return NULL;
2448
2449 switch (chan->conn->type) {
2450 case ACL_LINK:
2451 cnt = hdev->acl_cnt;
2452 break;
2453 case SCO_LINK:
2454 case ESCO_LINK:
2455 cnt = hdev->sco_cnt;
2456 break;
2457 case LE_LINK:
2458 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2459 break;
2460 default:
2461 cnt = 0;
2462 BT_ERR("Unknown link type");
2463 }
2464
2465 q = cnt / num;
2466 *quote = q ? q : 1;
2467 BT_DBG("chan %p quote %d", chan, *quote);
2468 return chan;
2469}
2470
02b20f0b
LAD
2471static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2472{
2473 struct hci_conn_hash *h = &hdev->conn_hash;
2474 struct hci_conn *conn;
2475 int num = 0;
2476
2477 BT_DBG("%s", hdev->name);
2478
bf4c6325
GP
2479 rcu_read_lock();
2480
2481 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2482 struct hci_chan *chan;
2483
2484 if (conn->type != type)
2485 continue;
2486
2487 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2488 continue;
2489
2490 num++;
2491
8192edef 2492 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2493 struct sk_buff *skb;
2494
2495 if (chan->sent) {
2496 chan->sent = 0;
2497 continue;
2498 }
2499
2500 if (skb_queue_empty(&chan->data_q))
2501 continue;
2502
2503 skb = skb_peek(&chan->data_q);
2504 if (skb->priority >= HCI_PRIO_MAX - 1)
2505 continue;
2506
2507 skb->priority = HCI_PRIO_MAX - 1;
2508
2509 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2510 skb->priority);
2511 }
2512
2513 if (hci_conn_num(hdev, type) == num)
2514 break;
2515 }
bf4c6325
GP
2516
2517 rcu_read_unlock();
2518
02b20f0b
LAD
2519}
2520
b71d385a
AE
2521static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2522{
2523 /* Calculate count of blocks used by this packet */
2524 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2525}
2526
63d2bc1b 2527static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2528{
1da177e4
LT
2529 if (!test_bit(HCI_RAW, &hdev->flags)) {
2530 /* ACL tx timeout must be longer than maximum
2531 * link supervision timeout (40.9 seconds) */
63d2bc1b 2532 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
cc48dc0a 2533 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2534 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2535 }
63d2bc1b 2536}
1da177e4 2537
63d2bc1b
AE
2538static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2539{
2540 unsigned int cnt = hdev->acl_cnt;
2541 struct hci_chan *chan;
2542 struct sk_buff *skb;
2543 int quote;
2544
2545 __check_timeout(hdev, cnt);
04837f64 2546
73d80deb
LAD
2547 while (hdev->acl_cnt &&
2548 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2549 u32 priority = (skb_peek(&chan->data_q))->priority;
2550 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2551 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2552 skb->len, skb->priority);
2553
ec1cce24
LAD
2554 /* Stop if priority has changed */
2555 if (skb->priority < priority)
2556 break;
2557
2558 skb = skb_dequeue(&chan->data_q);
2559
73d80deb
LAD
2560 hci_conn_enter_active_mode(chan->conn,
2561 bt_cb(skb)->force_active);
04837f64 2562
1da177e4
LT
2563 hci_send_frame(skb);
2564 hdev->acl_last_tx = jiffies;
2565
2566 hdev->acl_cnt--;
73d80deb
LAD
2567 chan->sent++;
2568 chan->conn->sent++;
1da177e4
LT
2569 }
2570 }
02b20f0b
LAD
2571
2572 if (cnt != hdev->acl_cnt)
2573 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2574}
2575
b71d385a
AE
2576static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2577{
63d2bc1b 2578 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2579 struct hci_chan *chan;
2580 struct sk_buff *skb;
2581 int quote;
b71d385a 2582
63d2bc1b 2583 __check_timeout(hdev, cnt);
b71d385a
AE
2584
2585 while (hdev->block_cnt > 0 &&
2586 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2587 u32 priority = (skb_peek(&chan->data_q))->priority;
2588 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2589 int blocks;
2590
2591 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2592 skb->len, skb->priority);
2593
2594 /* Stop if priority has changed */
2595 if (skb->priority < priority)
2596 break;
2597
2598 skb = skb_dequeue(&chan->data_q);
2599
2600 blocks = __get_blocks(hdev, skb);
2601 if (blocks > hdev->block_cnt)
2602 return;
2603
2604 hci_conn_enter_active_mode(chan->conn,
2605 bt_cb(skb)->force_active);
2606
2607 hci_send_frame(skb);
2608 hdev->acl_last_tx = jiffies;
2609
2610 hdev->block_cnt -= blocks;
2611 quote -= blocks;
2612
2613 chan->sent += blocks;
2614 chan->conn->sent += blocks;
2615 }
2616 }
2617
2618 if (cnt != hdev->block_cnt)
2619 hci_prio_recalculate(hdev, ACL_LINK);
2620}
2621
2622static inline void hci_sched_acl(struct hci_dev *hdev)
2623{
2624 BT_DBG("%s", hdev->name);
2625
2626 if (!hci_conn_num(hdev, ACL_LINK))
2627 return;
2628
2629 switch (hdev->flow_ctl_mode) {
2630 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2631 hci_sched_acl_pkt(hdev);
2632 break;
2633
2634 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2635 hci_sched_acl_blk(hdev);
2636 break;
2637 }
2638}
2639
1da177e4
LT
2640/* Schedule SCO */
2641static inline void hci_sched_sco(struct hci_dev *hdev)
2642{
2643 struct hci_conn *conn;
2644 struct sk_buff *skb;
2645 int quote;
2646
2647 BT_DBG("%s", hdev->name);
2648
52087a79
LAD
2649 if (!hci_conn_num(hdev, SCO_LINK))
2650 return;
2651
1da177e4
LT
2652 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2653 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2654 BT_DBG("skb %p len %d", skb, skb->len);
2655 hci_send_frame(skb);
2656
2657 conn->sent++;
2658 if (conn->sent == ~0)
2659 conn->sent = 0;
2660 }
2661 }
2662}
2663
b6a0dc82
MH
2664static inline void hci_sched_esco(struct hci_dev *hdev)
2665{
2666 struct hci_conn *conn;
2667 struct sk_buff *skb;
2668 int quote;
2669
2670 BT_DBG("%s", hdev->name);
2671
52087a79
LAD
2672 if (!hci_conn_num(hdev, ESCO_LINK))
2673 return;
2674
b6a0dc82
MH
2675 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2676 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2677 BT_DBG("skb %p len %d", skb, skb->len);
2678 hci_send_frame(skb);
2679
2680 conn->sent++;
2681 if (conn->sent == ~0)
2682 conn->sent = 0;
2683 }
2684 }
2685}
2686
6ed58ec5
VT
2687static inline void hci_sched_le(struct hci_dev *hdev)
2688{
73d80deb 2689 struct hci_chan *chan;
6ed58ec5 2690 struct sk_buff *skb;
02b20f0b 2691 int quote, cnt, tmp;
6ed58ec5
VT
2692
2693 BT_DBG("%s", hdev->name);
2694
52087a79
LAD
2695 if (!hci_conn_num(hdev, LE_LINK))
2696 return;
2697
6ed58ec5
VT
2698 if (!test_bit(HCI_RAW, &hdev->flags)) {
2699 /* LE tx timeout must be longer than maximum
2700 * link supervision timeout (40.9 seconds) */
bae1f5d9 2701 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2702 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2703 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2704 }
2705
2706 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2707 tmp = cnt;
73d80deb 2708 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2709 u32 priority = (skb_peek(&chan->data_q))->priority;
2710 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2711 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2712 skb->len, skb->priority);
6ed58ec5 2713
ec1cce24
LAD
2714 /* Stop if priority has changed */
2715 if (skb->priority < priority)
2716 break;
2717
2718 skb = skb_dequeue(&chan->data_q);
2719
6ed58ec5
VT
2720 hci_send_frame(skb);
2721 hdev->le_last_tx = jiffies;
2722
2723 cnt--;
73d80deb
LAD
2724 chan->sent++;
2725 chan->conn->sent++;
6ed58ec5
VT
2726 }
2727 }
73d80deb 2728
6ed58ec5
VT
2729 if (hdev->le_pkts)
2730 hdev->le_cnt = cnt;
2731 else
2732 hdev->acl_cnt = cnt;
02b20f0b
LAD
2733
2734 if (cnt != tmp)
2735 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2736}
2737
3eff45ea 2738static void hci_tx_work(struct work_struct *work)
1da177e4 2739{
3eff45ea 2740 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2741 struct sk_buff *skb;
2742
6ed58ec5
VT
2743 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2744 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2745
2746 /* Schedule queues and send stuff to HCI driver */
2747
2748 hci_sched_acl(hdev);
2749
2750 hci_sched_sco(hdev);
2751
b6a0dc82
MH
2752 hci_sched_esco(hdev);
2753
6ed58ec5
VT
2754 hci_sched_le(hdev);
2755
1da177e4
LT
2756 /* Send next queued raw (unknown type) packet */
2757 while ((skb = skb_dequeue(&hdev->raw_q)))
2758 hci_send_frame(skb);
1da177e4
LT
2759}
2760
25985edc 2761/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2762
2763/* ACL data packet */
2764static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2765{
2766 struct hci_acl_hdr *hdr = (void *) skb->data;
2767 struct hci_conn *conn;
2768 __u16 handle, flags;
2769
2770 skb_pull(skb, HCI_ACL_HDR_SIZE);
2771
2772 handle = __le16_to_cpu(hdr->handle);
2773 flags = hci_flags(handle);
2774 handle = hci_handle(handle);
2775
2776 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2777
2778 hdev->stat.acl_rx++;
2779
2780 hci_dev_lock(hdev);
2781 conn = hci_conn_hash_lookup_handle(hdev, handle);
2782 hci_dev_unlock(hdev);
8e87d142 2783
1da177e4 2784 if (conn) {
65983fc7 2785 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2786
1da177e4 2787 /* Send to upper protocol */
686ebf28
UF
2788 l2cap_recv_acldata(conn, skb, flags);
2789 return;
1da177e4 2790 } else {
8e87d142 2791 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2792 hdev->name, handle);
2793 }
2794
2795 kfree_skb(skb);
2796}
2797
2798/* SCO data packet */
2799static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2800{
2801 struct hci_sco_hdr *hdr = (void *) skb->data;
2802 struct hci_conn *conn;
2803 __u16 handle;
2804
2805 skb_pull(skb, HCI_SCO_HDR_SIZE);
2806
2807 handle = __le16_to_cpu(hdr->handle);
2808
2809 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2810
2811 hdev->stat.sco_rx++;
2812
2813 hci_dev_lock(hdev);
2814 conn = hci_conn_hash_lookup_handle(hdev, handle);
2815 hci_dev_unlock(hdev);
2816
2817 if (conn) {
1da177e4 2818 /* Send to upper protocol */
686ebf28
UF
2819 sco_recv_scodata(conn, skb);
2820 return;
1da177e4 2821 } else {
8e87d142 2822 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2823 hdev->name, handle);
2824 }
2825
2826 kfree_skb(skb);
2827}
2828
b78752cc 2829static void hci_rx_work(struct work_struct *work)
1da177e4 2830{
b78752cc 2831 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2832 struct sk_buff *skb;
2833
2834 BT_DBG("%s", hdev->name);
2835
1da177e4 2836 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2837 /* Send copy to monitor */
2838 hci_send_to_monitor(hdev, skb);
2839
1da177e4
LT
2840 if (atomic_read(&hdev->promisc)) {
2841 /* Send copy to the sockets */
470fe1b5 2842 hci_send_to_sock(hdev, skb);
1da177e4
LT
2843 }
2844
2845 if (test_bit(HCI_RAW, &hdev->flags)) {
2846 kfree_skb(skb);
2847 continue;
2848 }
2849
2850 if (test_bit(HCI_INIT, &hdev->flags)) {
2851 /* Don't process data packets in this states. */
0d48d939 2852 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2853 case HCI_ACLDATA_PKT:
2854 case HCI_SCODATA_PKT:
2855 kfree_skb(skb);
2856 continue;
3ff50b79 2857 }
1da177e4
LT
2858 }
2859
2860 /* Process frame */
0d48d939 2861 switch (bt_cb(skb)->pkt_type) {
1da177e4 2862 case HCI_EVENT_PKT:
b78752cc 2863 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2864 hci_event_packet(hdev, skb);
2865 break;
2866
2867 case HCI_ACLDATA_PKT:
2868 BT_DBG("%s ACL data packet", hdev->name);
2869 hci_acldata_packet(hdev, skb);
2870 break;
2871
2872 case HCI_SCODATA_PKT:
2873 BT_DBG("%s SCO data packet", hdev->name);
2874 hci_scodata_packet(hdev, skb);
2875 break;
2876
2877 default:
2878 kfree_skb(skb);
2879 break;
2880 }
2881 }
1da177e4
LT
2882}
2883
c347b765 2884static void hci_cmd_work(struct work_struct *work)
1da177e4 2885{
c347b765 2886 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2887 struct sk_buff *skb;
2888
2889 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2890
1da177e4 2891 /* Send queued commands */
5a08ecce
AE
2892 if (atomic_read(&hdev->cmd_cnt)) {
2893 skb = skb_dequeue(&hdev->cmd_q);
2894 if (!skb)
2895 return;
2896
7585b97a 2897 kfree_skb(hdev->sent_cmd);
1da177e4 2898
70f23020
AE
2899 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2900 if (hdev->sent_cmd) {
1da177e4
LT
2901 atomic_dec(&hdev->cmd_cnt);
2902 hci_send_frame(skb);
7bdb8a5c
SJ
2903 if (test_bit(HCI_RESET, &hdev->flags))
2904 del_timer(&hdev->cmd_timer);
2905 else
2906 mod_timer(&hdev->cmd_timer,
6bd32326 2907 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2908 } else {
2909 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2910 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2911 }
2912 }
2913}
2519a1fc
AG
2914
2915int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2916{
2917 /* General inquiry access code (GIAC) */
2918 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2919 struct hci_cp_inquiry cp;
2920
2921 BT_DBG("%s", hdev->name);
2922
2923 if (test_bit(HCI_INQUIRY, &hdev->flags))
2924 return -EINPROGRESS;
2925
4663262c
JH
2926 inquiry_cache_flush(hdev);
2927
2519a1fc
AG
2928 memset(&cp, 0, sizeof(cp));
2929 memcpy(&cp.lap, lap, sizeof(cp.lap));
2930 cp.length = length;
2931
2932 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2933}
023d5049
AG
2934
2935int hci_cancel_inquiry(struct hci_dev *hdev)
2936{
2937 BT_DBG("%s", hdev->name);
2938
2939 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2940 return -EPERM;
2941
2942 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2943}