Bluetooth: mgmt: Fix missing connect failed event for LE
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4 42#include <linux/interrupt.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
70f23020 48#include <linux/uaccess.h>
1da177e4
LT
49#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
ab81cbf9
JH
54#define AUTO_OFF_TIMEOUT 2000
55
b78752cc 56static void hci_rx_work(struct work_struct *work);
c347b765 57static void hci_cmd_work(struct work_struct *work);
3eff45ea 58static void hci_tx_work(struct work_struct *work);
1da177e4 59
1da177e4
LT
60/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
75/* ---- HCI requests ---- */
76
23bb5763 77void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 78{
23bb5763
JH
79 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
a5040efa
JH
81 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
83 */
75fb0e32
JH
84 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1036b890 86 u16 opcode = __le16_to_cpu(sent->opcode);
75fb0e32
JH
87 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
1036b890 96 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
75fb0e32
JH
97 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
23bb5763 105 return;
75fb0e32 106 }
1da177e4
LT
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
8e87d142 127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 128 unsigned long opt, __u32 timeout)
1da177e4
LT
129{
130 DECLARE_WAITQUEUE(wait, current);
131 int err = 0;
132
133 BT_DBG("%s start", hdev->name);
134
135 hdev->req_status = HCI_REQ_PEND;
136
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 req(hdev, opt);
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return -EINTR;
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
e175072f 150 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
3ff50b79 160 }
1da177e4 161
a5040efa 162 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
163
164 BT_DBG("%s end: err %d", hdev->name, err);
165
166 return err;
167}
168
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 170 unsigned long opt, __u32 timeout)
1da177e4
LT
171{
172 int ret;
173
7c6a329e
MH
174 if (!test_bit(HCI_UP, &hdev->flags))
175 return -ENETDOWN;
176
1da177e4
LT
177 /* Serialize all requests */
178 hci_req_lock(hdev);
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
181
182 return ret;
183}
184
185static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186{
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Reset device */
f630cf0d 190 set_bit(HCI_RESET, &hdev->flags);
a9de9248 191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
192}
193
e61ef499 194static void bredr_init(struct hci_dev *hdev)
1da177e4 195{
b0916ea0 196 struct hci_cp_delete_stored_link_key cp;
1ebb9252 197 __le16 param;
89f2783d 198 __u8 flt_type;
1da177e4 199
2455a3ea
AE
200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
1da177e4
LT
202 /* Mandatory initialization */
203
204 /* Reset */
f630cf0d 205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 208 }
1da177e4
LT
209
210 /* Read Local Supported Features */
a9de9248 211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 212
1143e5a6 213 /* Read Local Version */
a9de9248 214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 215
1da177e4 216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 218
1da177e4 219 /* Read BD Address */
a9de9248
MH
220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
227
228 /* Read Voice Setting */
a9de9248 229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
89f2783d 234 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 236
1da177e4 237 /* Connection accept timeout ~20 secs */
aca3192c 238 param = cpu_to_le16(0x7d00);
a9de9248 239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
244}
245
e61ef499
AE
246static void amp_init(struct hci_dev *hdev)
247{
2455a3ea
AE
248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
e61ef499
AE
250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255}
256
257static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258{
259 struct sk_buff *skb;
260
261 BT_DBG("%s %ld", hdev->name, opt);
262
263 /* Driver initialization */
264
265 /* Special commands */
266 while ((skb = skb_dequeue(&hdev->driver_init))) {
267 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268 skb->dev = (void *) hdev;
269
270 skb_queue_tail(&hdev->cmd_q, skb);
271 queue_work(hdev->workqueue, &hdev->cmd_work);
272 }
273 skb_queue_purge(&hdev->driver_init);
274
275 switch (hdev->dev_type) {
276 case HCI_BREDR:
277 bredr_init(hdev);
278 break;
279
280 case HCI_AMP:
281 amp_init(hdev);
282 break;
283
284 default:
285 BT_ERR("Unknown device type %d", hdev->dev_type);
286 break;
287 }
288
289}
290
6ed58ec5
VT
291static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292{
293 BT_DBG("%s", hdev->name);
294
295 /* Read LE buffer size */
296 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
297}
298
1da177e4
LT
299static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 scan = opt;
302
303 BT_DBG("%s %x", hdev->name, scan);
304
305 /* Inquiry and Page scans */
a9de9248 306 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
307}
308
309static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 auth = opt;
312
313 BT_DBG("%s %x", hdev->name, auth);
314
315 /* Authentication */
a9de9248 316 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
317}
318
319static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 encrypt = opt;
322
323 BT_DBG("%s %x", hdev->name, encrypt);
324
e4e8e37c 325 /* Encryption */
a9de9248 326 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
327}
328
e4e8e37c
MH
329static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __le16 policy = cpu_to_le16(opt);
332
a418b893 333 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
334
335 /* Default link policy */
336 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
337}
338
8e87d142 339/* Get HCI device by index.
1da177e4
LT
340 * Device is held on return. */
341struct hci_dev *hci_dev_get(int index)
342{
8035ded4 343 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
344
345 BT_DBG("%d", index);
346
347 if (index < 0)
348 return NULL;
349
350 read_lock(&hci_dev_list_lock);
8035ded4 351 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
352 if (d->id == index) {
353 hdev = hci_dev_hold(d);
354 break;
355 }
356 }
357 read_unlock(&hci_dev_list_lock);
358 return hdev;
359}
1da177e4
LT
360
361/* ---- Inquiry support ---- */
ff9ef578 362
30dc78e1
JH
363bool hci_discovery_active(struct hci_dev *hdev)
364{
365 struct discovery_state *discov = &hdev->discovery;
366
6fbe195d 367 switch (discov->state) {
343f935b 368 case DISCOVERY_FINDING:
6fbe195d 369 case DISCOVERY_RESOLVING:
30dc78e1
JH
370 return true;
371
6fbe195d
AG
372 default:
373 return false;
374 }
30dc78e1
JH
375}
376
ff9ef578
JH
377void hci_discovery_set_state(struct hci_dev *hdev, int state)
378{
379 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380
381 if (hdev->discovery.state == state)
382 return;
383
384 switch (state) {
385 case DISCOVERY_STOPPED:
7b99b659
AG
386 if (hdev->discovery.state != DISCOVERY_STARTING)
387 mgmt_discovering(hdev, 0);
ff9ef578
JH
388 break;
389 case DISCOVERY_STARTING:
390 break;
343f935b 391 case DISCOVERY_FINDING:
ff9ef578
JH
392 mgmt_discovering(hdev, 1);
393 break;
30dc78e1
JH
394 case DISCOVERY_RESOLVING:
395 break;
ff9ef578
JH
396 case DISCOVERY_STOPPING:
397 break;
398 }
399
400 hdev->discovery.state = state;
401}
402
1da177e4
LT
403static void inquiry_cache_flush(struct hci_dev *hdev)
404{
30883512 405 struct discovery_state *cache = &hdev->discovery;
b57c1a56 406 struct inquiry_entry *p, *n;
1da177e4 407
561aafbc
JH
408 list_for_each_entry_safe(p, n, &cache->all, all) {
409 list_del(&p->all);
b57c1a56 410 kfree(p);
1da177e4 411 }
561aafbc
JH
412
413 INIT_LIST_HEAD(&cache->unknown);
414 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
415}
416
417struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
418{
30883512 419 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
420 struct inquiry_entry *e;
421
422 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
423
561aafbc
JH
424 list_for_each_entry(e, &cache->all, all) {
425 if (!bacmp(&e->data.bdaddr, bdaddr))
426 return e;
427 }
428
429 return NULL;
430}
431
432struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 433 bdaddr_t *bdaddr)
561aafbc 434{
30883512 435 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
436 struct inquiry_entry *e;
437
438 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
439
440 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 441 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
442 return e;
443 }
444
445 return NULL;
1da177e4
LT
446}
447
30dc78e1 448struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
449 bdaddr_t *bdaddr,
450 int state)
30dc78e1
JH
451{
452 struct discovery_state *cache = &hdev->discovery;
453 struct inquiry_entry *e;
454
455 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
456
457 list_for_each_entry(e, &cache->resolve, list) {
458 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
459 return e;
460 if (!bacmp(&e->data.bdaddr, bdaddr))
461 return e;
462 }
463
464 return NULL;
465}
466
a3d4e20a 467void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 468 struct inquiry_entry *ie)
a3d4e20a
JH
469{
470 struct discovery_state *cache = &hdev->discovery;
471 struct list_head *pos = &cache->resolve;
472 struct inquiry_entry *p;
473
474 list_del(&ie->list);
475
476 list_for_each_entry(p, &cache->resolve, list) {
477 if (p->name_state != NAME_PENDING &&
478 abs(p->data.rssi) >= abs(ie->data.rssi))
479 break;
480 pos = &p->list;
481 }
482
483 list_add(&ie->list, pos);
484}
485
3175405b 486bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 487 bool name_known, bool *ssp)
1da177e4 488{
30883512 489 struct discovery_state *cache = &hdev->discovery;
70f23020 490 struct inquiry_entry *ie;
1da177e4
LT
491
492 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
493
388fc8fa
JH
494 if (ssp)
495 *ssp = data->ssp_mode;
496
70f23020 497 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 498 if (ie) {
388fc8fa
JH
499 if (ie->data.ssp_mode && ssp)
500 *ssp = true;
501
a3d4e20a
JH
502 if (ie->name_state == NAME_NEEDED &&
503 data->rssi != ie->data.rssi) {
504 ie->data.rssi = data->rssi;
505 hci_inquiry_cache_update_resolve(hdev, ie);
506 }
507
561aafbc 508 goto update;
a3d4e20a 509 }
561aafbc
JH
510
511 /* Entry not in the cache. Add new one. */
512 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
513 if (!ie)
3175405b 514 return false;
561aafbc
JH
515
516 list_add(&ie->all, &cache->all);
517
518 if (name_known) {
519 ie->name_state = NAME_KNOWN;
520 } else {
521 ie->name_state = NAME_NOT_KNOWN;
522 list_add(&ie->list, &cache->unknown);
523 }
70f23020 524
561aafbc
JH
525update:
526 if (name_known && ie->name_state != NAME_KNOWN &&
527 ie->name_state != NAME_PENDING) {
528 ie->name_state = NAME_KNOWN;
529 list_del(&ie->list);
1da177e4
LT
530 }
531
70f23020
AE
532 memcpy(&ie->data, data, sizeof(*data));
533 ie->timestamp = jiffies;
1da177e4 534 cache->timestamp = jiffies;
3175405b
JH
535
536 if (ie->name_state == NAME_NOT_KNOWN)
537 return false;
538
539 return true;
1da177e4
LT
540}
541
542static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
543{
30883512 544 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
545 struct inquiry_info *info = (struct inquiry_info *) buf;
546 struct inquiry_entry *e;
547 int copied = 0;
548
561aafbc 549 list_for_each_entry(e, &cache->all, all) {
1da177e4 550 struct inquiry_data *data = &e->data;
b57c1a56
JH
551
552 if (copied >= num)
553 break;
554
1da177e4
LT
555 bacpy(&info->bdaddr, &data->bdaddr);
556 info->pscan_rep_mode = data->pscan_rep_mode;
557 info->pscan_period_mode = data->pscan_period_mode;
558 info->pscan_mode = data->pscan_mode;
559 memcpy(info->dev_class, data->dev_class, 3);
560 info->clock_offset = data->clock_offset;
b57c1a56 561
1da177e4 562 info++;
b57c1a56 563 copied++;
1da177e4
LT
564 }
565
566 BT_DBG("cache %p, copied %d", cache, copied);
567 return copied;
568}
569
570static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
571{
572 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
573 struct hci_cp_inquiry cp;
574
575 BT_DBG("%s", hdev->name);
576
577 if (test_bit(HCI_INQUIRY, &hdev->flags))
578 return;
579
580 /* Start Inquiry */
581 memcpy(&cp.lap, &ir->lap, 3);
582 cp.length = ir->length;
583 cp.num_rsp = ir->num_rsp;
a9de9248 584 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
585}
586
587int hci_inquiry(void __user *arg)
588{
589 __u8 __user *ptr = arg;
590 struct hci_inquiry_req ir;
591 struct hci_dev *hdev;
592 int err = 0, do_inquiry = 0, max_rsp;
593 long timeo;
594 __u8 *buf;
595
596 if (copy_from_user(&ir, ptr, sizeof(ir)))
597 return -EFAULT;
598
5a08ecce
AE
599 hdev = hci_dev_get(ir.dev_id);
600 if (!hdev)
1da177e4
LT
601 return -ENODEV;
602
09fd0de5 603 hci_dev_lock(hdev);
8e87d142 604 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
605 inquiry_cache_empty(hdev) ||
606 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
607 inquiry_cache_flush(hdev);
608 do_inquiry = 1;
609 }
09fd0de5 610 hci_dev_unlock(hdev);
1da177e4 611
04837f64 612 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
613
614 if (do_inquiry) {
615 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
616 if (err < 0)
617 goto done;
618 }
1da177e4
LT
619
620 /* for unlimited number of responses we will use buffer with 255 entries */
621 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
622
623 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
624 * copy it to the user space.
625 */
01df8c31 626 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 627 if (!buf) {
1da177e4
LT
628 err = -ENOMEM;
629 goto done;
630 }
631
09fd0de5 632 hci_dev_lock(hdev);
1da177e4 633 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 634 hci_dev_unlock(hdev);
1da177e4
LT
635
636 BT_DBG("num_rsp %d", ir.num_rsp);
637
638 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
639 ptr += sizeof(ir);
640 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
641 ir.num_rsp))
642 err = -EFAULT;
8e87d142 643 } else
1da177e4
LT
644 err = -EFAULT;
645
646 kfree(buf);
647
648done:
649 hci_dev_put(hdev);
650 return err;
651}
652
653/* ---- HCI ioctl helpers ---- */
654
655int hci_dev_open(__u16 dev)
656{
657 struct hci_dev *hdev;
658 int ret = 0;
659
5a08ecce
AE
660 hdev = hci_dev_get(dev);
661 if (!hdev)
1da177e4
LT
662 return -ENODEV;
663
664 BT_DBG("%s %p", hdev->name, hdev);
665
666 hci_req_lock(hdev);
667
94324962
JH
668 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
669 ret = -ENODEV;
670 goto done;
671 }
672
611b30f7
MH
673 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
674 ret = -ERFKILL;
675 goto done;
676 }
677
1da177e4
LT
678 if (test_bit(HCI_UP, &hdev->flags)) {
679 ret = -EALREADY;
680 goto done;
681 }
682
683 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
684 set_bit(HCI_RAW, &hdev->flags);
685
07e3b94a
AE
686 /* Treat all non BR/EDR controllers as raw devices if
687 enable_hs is not set */
688 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
689 set_bit(HCI_RAW, &hdev->flags);
690
1da177e4
LT
691 if (hdev->open(hdev)) {
692 ret = -EIO;
693 goto done;
694 }
695
696 if (!test_bit(HCI_RAW, &hdev->flags)) {
697 atomic_set(&hdev->cmd_cnt, 1);
698 set_bit(HCI_INIT, &hdev->flags);
a5040efa 699 hdev->init_last_cmd = 0;
1da177e4 700
04837f64
MH
701 ret = __hci_request(hdev, hci_init_req, 0,
702 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 703
eead27da 704 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
705 ret = __hci_request(hdev, hci_le_init_req, 0,
706 msecs_to_jiffies(HCI_INIT_TIMEOUT));
707
1da177e4
LT
708 clear_bit(HCI_INIT, &hdev->flags);
709 }
710
711 if (!ret) {
712 hci_dev_hold(hdev);
713 set_bit(HCI_UP, &hdev->flags);
714 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 715 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 716 hci_dev_lock(hdev);
744cf19e 717 mgmt_powered(hdev, 1);
09fd0de5 718 hci_dev_unlock(hdev);
56e5cb86 719 }
8e87d142 720 } else {
1da177e4 721 /* Init failed, cleanup */
3eff45ea 722 flush_work(&hdev->tx_work);
c347b765 723 flush_work(&hdev->cmd_work);
b78752cc 724 flush_work(&hdev->rx_work);
1da177e4
LT
725
726 skb_queue_purge(&hdev->cmd_q);
727 skb_queue_purge(&hdev->rx_q);
728
729 if (hdev->flush)
730 hdev->flush(hdev);
731
732 if (hdev->sent_cmd) {
733 kfree_skb(hdev->sent_cmd);
734 hdev->sent_cmd = NULL;
735 }
736
737 hdev->close(hdev);
738 hdev->flags = 0;
739 }
740
741done:
742 hci_req_unlock(hdev);
743 hci_dev_put(hdev);
744 return ret;
745}
746
747static int hci_dev_do_close(struct hci_dev *hdev)
748{
749 BT_DBG("%s %p", hdev->name, hdev);
750
28b75a89
AG
751 cancel_work_sync(&hdev->le_scan);
752
1da177e4
LT
753 hci_req_cancel(hdev, ENODEV);
754 hci_req_lock(hdev);
755
756 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 757 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
758 hci_req_unlock(hdev);
759 return 0;
760 }
761
3eff45ea
GP
762 /* Flush RX and TX works */
763 flush_work(&hdev->tx_work);
b78752cc 764 flush_work(&hdev->rx_work);
1da177e4 765
16ab91ab 766 if (hdev->discov_timeout > 0) {
e0f9309f 767 cancel_delayed_work(&hdev->discov_off);
16ab91ab 768 hdev->discov_timeout = 0;
5e5282bb 769 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
770 }
771
a8b2d5c2 772 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
773 cancel_delayed_work(&hdev->service_cache);
774
7ba8b4be
AG
775 cancel_delayed_work_sync(&hdev->le_scan_disable);
776
09fd0de5 777 hci_dev_lock(hdev);
1da177e4
LT
778 inquiry_cache_flush(hdev);
779 hci_conn_hash_flush(hdev);
09fd0de5 780 hci_dev_unlock(hdev);
1da177e4
LT
781
782 hci_notify(hdev, HCI_DEV_DOWN);
783
784 if (hdev->flush)
785 hdev->flush(hdev);
786
787 /* Reset device */
788 skb_queue_purge(&hdev->cmd_q);
789 atomic_set(&hdev->cmd_cnt, 1);
8af59467
JH
790 if (!test_bit(HCI_RAW, &hdev->flags) &&
791 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
1da177e4 792 set_bit(HCI_INIT, &hdev->flags);
04837f64 793 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 794 msecs_to_jiffies(250));
1da177e4
LT
795 clear_bit(HCI_INIT, &hdev->flags);
796 }
797
c347b765
GP
798 /* flush cmd work */
799 flush_work(&hdev->cmd_work);
1da177e4
LT
800
801 /* Drop queues */
802 skb_queue_purge(&hdev->rx_q);
803 skb_queue_purge(&hdev->cmd_q);
804 skb_queue_purge(&hdev->raw_q);
805
806 /* Drop last sent command */
807 if (hdev->sent_cmd) {
b79f44c1 808 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
809 kfree_skb(hdev->sent_cmd);
810 hdev->sent_cmd = NULL;
811 }
812
813 /* After this point our queues are empty
814 * and no tasks are scheduled. */
815 hdev->close(hdev);
816
8ee56540
MH
817 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
818 hci_dev_lock(hdev);
819 mgmt_powered(hdev, 0);
820 hci_dev_unlock(hdev);
821 }
5add6af8 822
1da177e4
LT
823 /* Clear flags */
824 hdev->flags = 0;
825
e59fda8d 826 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 827 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 828
1da177e4
LT
829 hci_req_unlock(hdev);
830
831 hci_dev_put(hdev);
832 return 0;
833}
834
835int hci_dev_close(__u16 dev)
836{
837 struct hci_dev *hdev;
838 int err;
839
70f23020
AE
840 hdev = hci_dev_get(dev);
841 if (!hdev)
1da177e4 842 return -ENODEV;
8ee56540
MH
843
844 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
845 cancel_delayed_work(&hdev->power_off);
846
1da177e4 847 err = hci_dev_do_close(hdev);
8ee56540 848
1da177e4
LT
849 hci_dev_put(hdev);
850 return err;
851}
852
853int hci_dev_reset(__u16 dev)
854{
855 struct hci_dev *hdev;
856 int ret = 0;
857
70f23020
AE
858 hdev = hci_dev_get(dev);
859 if (!hdev)
1da177e4
LT
860 return -ENODEV;
861
862 hci_req_lock(hdev);
1da177e4
LT
863
864 if (!test_bit(HCI_UP, &hdev->flags))
865 goto done;
866
867 /* Drop queues */
868 skb_queue_purge(&hdev->rx_q);
869 skb_queue_purge(&hdev->cmd_q);
870
09fd0de5 871 hci_dev_lock(hdev);
1da177e4
LT
872 inquiry_cache_flush(hdev);
873 hci_conn_hash_flush(hdev);
09fd0de5 874 hci_dev_unlock(hdev);
1da177e4
LT
875
876 if (hdev->flush)
877 hdev->flush(hdev);
878
8e87d142 879 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 880 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
881
882 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
883 ret = __hci_request(hdev, hci_reset_req, 0,
884 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
885
886done:
1da177e4
LT
887 hci_req_unlock(hdev);
888 hci_dev_put(hdev);
889 return ret;
890}
891
892int hci_dev_reset_stat(__u16 dev)
893{
894 struct hci_dev *hdev;
895 int ret = 0;
896
70f23020
AE
897 hdev = hci_dev_get(dev);
898 if (!hdev)
1da177e4
LT
899 return -ENODEV;
900
901 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
902
903 hci_dev_put(hdev);
904
905 return ret;
906}
907
908int hci_dev_cmd(unsigned int cmd, void __user *arg)
909{
910 struct hci_dev *hdev;
911 struct hci_dev_req dr;
912 int err = 0;
913
914 if (copy_from_user(&dr, arg, sizeof(dr)))
915 return -EFAULT;
916
70f23020
AE
917 hdev = hci_dev_get(dr.dev_id);
918 if (!hdev)
1da177e4
LT
919 return -ENODEV;
920
921 switch (cmd) {
922 case HCISETAUTH:
04837f64
MH
923 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
925 break;
926
927 case HCISETENCRYPT:
928 if (!lmp_encrypt_capable(hdev)) {
929 err = -EOPNOTSUPP;
930 break;
931 }
932
933 if (!test_bit(HCI_AUTH, &hdev->flags)) {
934 /* Auth must be enabled first */
04837f64
MH
935 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
936 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
937 if (err)
938 break;
939 }
940
04837f64
MH
941 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
942 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
943 break;
944
945 case HCISETSCAN:
04837f64
MH
946 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
947 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
948 break;
949
1da177e4 950 case HCISETLINKPOL:
e4e8e37c
MH
951 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
952 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
953 break;
954
955 case HCISETLINKMODE:
e4e8e37c
MH
956 hdev->link_mode = ((__u16) dr.dev_opt) &
957 (HCI_LM_MASTER | HCI_LM_ACCEPT);
958 break;
959
960 case HCISETPTYPE:
961 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
962 break;
963
964 case HCISETACLMTU:
e4e8e37c
MH
965 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
966 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
967 break;
968
969 case HCISETSCOMTU:
e4e8e37c
MH
970 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
971 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
972 break;
973
974 default:
975 err = -EINVAL;
976 break;
977 }
e4e8e37c 978
1da177e4
LT
979 hci_dev_put(hdev);
980 return err;
981}
982
983int hci_get_dev_list(void __user *arg)
984{
8035ded4 985 struct hci_dev *hdev;
1da177e4
LT
986 struct hci_dev_list_req *dl;
987 struct hci_dev_req *dr;
1da177e4
LT
988 int n = 0, size, err;
989 __u16 dev_num;
990
991 if (get_user(dev_num, (__u16 __user *) arg))
992 return -EFAULT;
993
994 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
995 return -EINVAL;
996
997 size = sizeof(*dl) + dev_num * sizeof(*dr);
998
70f23020
AE
999 dl = kzalloc(size, GFP_KERNEL);
1000 if (!dl)
1da177e4
LT
1001 return -ENOMEM;
1002
1003 dr = dl->dev_req;
1004
f20d09d5 1005 read_lock(&hci_dev_list_lock);
8035ded4 1006 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1007 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1008 cancel_delayed_work(&hdev->power_off);
c542a06c 1009
a8b2d5c2
JH
1010 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1011 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1012
1da177e4
LT
1013 (dr + n)->dev_id = hdev->id;
1014 (dr + n)->dev_opt = hdev->flags;
c542a06c 1015
1da177e4
LT
1016 if (++n >= dev_num)
1017 break;
1018 }
f20d09d5 1019 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1020
1021 dl->dev_num = n;
1022 size = sizeof(*dl) + n * sizeof(*dr);
1023
1024 err = copy_to_user(arg, dl, size);
1025 kfree(dl);
1026
1027 return err ? -EFAULT : 0;
1028}
1029
1030int hci_get_dev_info(void __user *arg)
1031{
1032 struct hci_dev *hdev;
1033 struct hci_dev_info di;
1034 int err = 0;
1035
1036 if (copy_from_user(&di, arg, sizeof(di)))
1037 return -EFAULT;
1038
70f23020
AE
1039 hdev = hci_dev_get(di.dev_id);
1040 if (!hdev)
1da177e4
LT
1041 return -ENODEV;
1042
a8b2d5c2 1043 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1044 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1045
a8b2d5c2
JH
1046 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1047 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1048
1da177e4
LT
1049 strcpy(di.name, hdev->name);
1050 di.bdaddr = hdev->bdaddr;
943da25d 1051 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1052 di.flags = hdev->flags;
1053 di.pkt_type = hdev->pkt_type;
1054 di.acl_mtu = hdev->acl_mtu;
1055 di.acl_pkts = hdev->acl_pkts;
1056 di.sco_mtu = hdev->sco_mtu;
1057 di.sco_pkts = hdev->sco_pkts;
1058 di.link_policy = hdev->link_policy;
1059 di.link_mode = hdev->link_mode;
1060
1061 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1062 memcpy(&di.features, &hdev->features, sizeof(di.features));
1063
1064 if (copy_to_user(arg, &di, sizeof(di)))
1065 err = -EFAULT;
1066
1067 hci_dev_put(hdev);
1068
1069 return err;
1070}
1071
1072/* ---- Interface to HCI drivers ---- */
1073
611b30f7
MH
1074static int hci_rfkill_set_block(void *data, bool blocked)
1075{
1076 struct hci_dev *hdev = data;
1077
1078 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1079
1080 if (!blocked)
1081 return 0;
1082
1083 hci_dev_do_close(hdev);
1084
1085 return 0;
1086}
1087
1088static const struct rfkill_ops hci_rfkill_ops = {
1089 .set_block = hci_rfkill_set_block,
1090};
1091
1da177e4
LT
1092/* Alloc HCI device */
1093struct hci_dev *hci_alloc_dev(void)
1094{
1095 struct hci_dev *hdev;
1096
25ea6db0 1097 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
1098 if (!hdev)
1099 return NULL;
1100
0ac7e700 1101 hci_init_sysfs(hdev);
1da177e4
LT
1102 skb_queue_head_init(&hdev->driver_init);
1103
1104 return hdev;
1105}
1106EXPORT_SYMBOL(hci_alloc_dev);
1107
1108/* Free HCI device */
1109void hci_free_dev(struct hci_dev *hdev)
1110{
1111 skb_queue_purge(&hdev->driver_init);
1112
a91f2e39
MH
1113 /* will free via device release */
1114 put_device(&hdev->dev);
1da177e4
LT
1115}
1116EXPORT_SYMBOL(hci_free_dev);
1117
ab81cbf9
JH
1118static void hci_power_on(struct work_struct *work)
1119{
1120 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1121
1122 BT_DBG("%s", hdev->name);
1123
1124 if (hci_dev_open(hdev->id) < 0)
1125 return;
1126
a8b2d5c2 1127 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1128 schedule_delayed_work(&hdev->power_off,
3243553f 1129 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1130
a8b2d5c2 1131 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1132 mgmt_index_added(hdev);
ab81cbf9
JH
1133}
1134
1135static void hci_power_off(struct work_struct *work)
1136{
3243553f
JH
1137 struct hci_dev *hdev = container_of(work, struct hci_dev,
1138 power_off.work);
ab81cbf9
JH
1139
1140 BT_DBG("%s", hdev->name);
1141
8ee56540 1142 hci_dev_do_close(hdev);
ab81cbf9
JH
1143}
1144
16ab91ab
JH
1145static void hci_discov_off(struct work_struct *work)
1146{
1147 struct hci_dev *hdev;
1148 u8 scan = SCAN_PAGE;
1149
1150 hdev = container_of(work, struct hci_dev, discov_off.work);
1151
1152 BT_DBG("%s", hdev->name);
1153
09fd0de5 1154 hci_dev_lock(hdev);
16ab91ab
JH
1155
1156 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1157
1158 hdev->discov_timeout = 0;
1159
09fd0de5 1160 hci_dev_unlock(hdev);
16ab91ab
JH
1161}
1162
2aeb9a1a
JH
1163int hci_uuids_clear(struct hci_dev *hdev)
1164{
1165 struct list_head *p, *n;
1166
1167 list_for_each_safe(p, n, &hdev->uuids) {
1168 struct bt_uuid *uuid;
1169
1170 uuid = list_entry(p, struct bt_uuid, list);
1171
1172 list_del(p);
1173 kfree(uuid);
1174 }
1175
1176 return 0;
1177}
1178
55ed8ca1
JH
1179int hci_link_keys_clear(struct hci_dev *hdev)
1180{
1181 struct list_head *p, *n;
1182
1183 list_for_each_safe(p, n, &hdev->link_keys) {
1184 struct link_key *key;
1185
1186 key = list_entry(p, struct link_key, list);
1187
1188 list_del(p);
1189 kfree(key);
1190 }
1191
1192 return 0;
1193}
1194
b899efaf
VCG
1195int hci_smp_ltks_clear(struct hci_dev *hdev)
1196{
1197 struct smp_ltk *k, *tmp;
1198
1199 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1200 list_del(&k->list);
1201 kfree(k);
1202 }
1203
1204 return 0;
1205}
1206
55ed8ca1
JH
1207struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1208{
8035ded4 1209 struct link_key *k;
55ed8ca1 1210
8035ded4 1211 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1212 if (bacmp(bdaddr, &k->bdaddr) == 0)
1213 return k;
55ed8ca1
JH
1214
1215 return NULL;
1216}
1217
745c0ce3 1218static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
d25e28ab
JH
1219 u8 key_type, u8 old_key_type)
1220{
1221 /* Legacy key */
1222 if (key_type < 0x03)
745c0ce3 1223 return true;
d25e28ab
JH
1224
1225 /* Debug keys are insecure so don't store them persistently */
1226 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1227 return false;
d25e28ab
JH
1228
1229 /* Changed combination key and there's no previous one */
1230 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1231 return false;
d25e28ab
JH
1232
1233 /* Security mode 3 case */
1234 if (!conn)
745c0ce3 1235 return true;
d25e28ab
JH
1236
1237 /* Neither local nor remote side had no-bonding as requirement */
1238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1239 return true;
d25e28ab
JH
1240
1241 /* Local side had dedicated bonding as requirement */
1242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1243 return true;
d25e28ab
JH
1244
1245 /* Remote side had dedicated bonding as requirement */
1246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1247 return true;
d25e28ab
JH
1248
1249 /* If none of the above criteria match, then don't store the key
1250 * persistently */
745c0ce3 1251 return false;
d25e28ab
JH
1252}
1253
c9839a11 1254struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1255{
c9839a11 1256 struct smp_ltk *k;
75d262c2 1257
c9839a11
VCG
1258 list_for_each_entry(k, &hdev->long_term_keys, list) {
1259 if (k->ediv != ediv ||
1260 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1261 continue;
1262
c9839a11 1263 return k;
75d262c2
VCG
1264 }
1265
1266 return NULL;
1267}
1268EXPORT_SYMBOL(hci_find_ltk);
1269
c9839a11 1270struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1271 u8 addr_type)
75d262c2 1272{
c9839a11 1273 struct smp_ltk *k;
75d262c2 1274
c9839a11
VCG
1275 list_for_each_entry(k, &hdev->long_term_keys, list)
1276 if (addr_type == k->bdaddr_type &&
1277 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1278 return k;
1279
1280 return NULL;
1281}
c9839a11 1282EXPORT_SYMBOL(hci_find_ltk_by_addr);
75d262c2 1283
d25e28ab 1284int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1285 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1286{
1287 struct link_key *key, *old_key;
745c0ce3
VA
1288 u8 old_key_type;
1289 bool persistent;
55ed8ca1
JH
1290
1291 old_key = hci_find_link_key(hdev, bdaddr);
1292 if (old_key) {
1293 old_key_type = old_key->type;
1294 key = old_key;
1295 } else {
12adcf3a 1296 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1297 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1298 if (!key)
1299 return -ENOMEM;
1300 list_add(&key->list, &hdev->link_keys);
1301 }
1302
1303 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1304
d25e28ab
JH
1305 /* Some buggy controller combinations generate a changed
1306 * combination key for legacy pairing even when there's no
1307 * previous key */
1308 if (type == HCI_LK_CHANGED_COMBINATION &&
1309 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1310 old_key_type == 0xff) {
d25e28ab 1311 type = HCI_LK_COMBINATION;
655fe6ec
JH
1312 if (conn)
1313 conn->key_type = type;
1314 }
d25e28ab 1315
55ed8ca1
JH
1316 bacpy(&key->bdaddr, bdaddr);
1317 memcpy(key->val, val, 16);
55ed8ca1
JH
1318 key->pin_len = pin_len;
1319
b6020ba0 1320 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1321 key->type = old_key_type;
4748fed2
JH
1322 else
1323 key->type = type;
1324
4df378a1
JH
1325 if (!new_key)
1326 return 0;
1327
1328 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1329
744cf19e 1330 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1331
6ec5bcad
VA
1332 if (conn)
1333 conn->flush_key = !persistent;
55ed8ca1
JH
1334
1335 return 0;
1336}
1337
c9839a11 1338int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1339 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1340 ediv, u8 rand[8])
75d262c2 1341{
c9839a11 1342 struct smp_ltk *key, *old_key;
75d262c2 1343
c9839a11
VCG
1344 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1345 return 0;
75d262c2 1346
c9839a11
VCG
1347 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1348 if (old_key)
75d262c2 1349 key = old_key;
c9839a11
VCG
1350 else {
1351 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1352 if (!key)
1353 return -ENOMEM;
c9839a11 1354 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1355 }
1356
75d262c2 1357 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1358 key->bdaddr_type = addr_type;
1359 memcpy(key->val, tk, sizeof(key->val));
1360 key->authenticated = authenticated;
1361 key->ediv = ediv;
1362 key->enc_size = enc_size;
1363 key->type = type;
1364 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1365
c9839a11
VCG
1366 if (!new_key)
1367 return 0;
75d262c2 1368
261cc5aa
VCG
1369 if (type & HCI_SMP_LTK)
1370 mgmt_new_ltk(hdev, key, 1);
1371
75d262c2
VCG
1372 return 0;
1373}
1374
55ed8ca1
JH
1375int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1376{
1377 struct link_key *key;
1378
1379 key = hci_find_link_key(hdev, bdaddr);
1380 if (!key)
1381 return -ENOENT;
1382
1383 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1384
1385 list_del(&key->list);
1386 kfree(key);
1387
1388 return 0;
1389}
1390
b899efaf
VCG
1391int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1392{
1393 struct smp_ltk *k, *tmp;
1394
1395 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1396 if (bacmp(bdaddr, &k->bdaddr))
1397 continue;
1398
1399 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1400
1401 list_del(&k->list);
1402 kfree(k);
1403 }
1404
1405 return 0;
1406}
1407
6bd32326
VT
1408/* HCI command timer function */
1409static void hci_cmd_timer(unsigned long arg)
1410{
1411 struct hci_dev *hdev = (void *) arg;
1412
1413 BT_ERR("%s command tx timeout", hdev->name);
1414 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1415 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1416}
1417
2763eda6 1418struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1419 bdaddr_t *bdaddr)
2763eda6
SJ
1420{
1421 struct oob_data *data;
1422
1423 list_for_each_entry(data, &hdev->remote_oob_data, list)
1424 if (bacmp(bdaddr, &data->bdaddr) == 0)
1425 return data;
1426
1427 return NULL;
1428}
1429
1430int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1431{
1432 struct oob_data *data;
1433
1434 data = hci_find_remote_oob_data(hdev, bdaddr);
1435 if (!data)
1436 return -ENOENT;
1437
1438 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1439
1440 list_del(&data->list);
1441 kfree(data);
1442
1443 return 0;
1444}
1445
1446int hci_remote_oob_data_clear(struct hci_dev *hdev)
1447{
1448 struct oob_data *data, *n;
1449
1450 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1451 list_del(&data->list);
1452 kfree(data);
1453 }
1454
1455 return 0;
1456}
1457
1458int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1459 u8 *randomizer)
2763eda6
SJ
1460{
1461 struct oob_data *data;
1462
1463 data = hci_find_remote_oob_data(hdev, bdaddr);
1464
1465 if (!data) {
1466 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1467 if (!data)
1468 return -ENOMEM;
1469
1470 bacpy(&data->bdaddr, bdaddr);
1471 list_add(&data->list, &hdev->remote_oob_data);
1472 }
1473
1474 memcpy(data->hash, hash, sizeof(data->hash));
1475 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1476
1477 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1478
1479 return 0;
1480}
1481
04124681 1482struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1483{
8035ded4 1484 struct bdaddr_list *b;
b2a66aad 1485
8035ded4 1486 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1487 if (bacmp(bdaddr, &b->bdaddr) == 0)
1488 return b;
b2a66aad
AJ
1489
1490 return NULL;
1491}
1492
1493int hci_blacklist_clear(struct hci_dev *hdev)
1494{
1495 struct list_head *p, *n;
1496
1497 list_for_each_safe(p, n, &hdev->blacklist) {
1498 struct bdaddr_list *b;
1499
1500 b = list_entry(p, struct bdaddr_list, list);
1501
1502 list_del(p);
1503 kfree(b);
1504 }
1505
1506 return 0;
1507}
1508
88c1fe4b 1509int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1510{
1511 struct bdaddr_list *entry;
b2a66aad
AJ
1512
1513 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1514 return -EBADF;
1515
5e762444
AJ
1516 if (hci_blacklist_lookup(hdev, bdaddr))
1517 return -EEXIST;
b2a66aad
AJ
1518
1519 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1520 if (!entry)
1521 return -ENOMEM;
b2a66aad
AJ
1522
1523 bacpy(&entry->bdaddr, bdaddr);
1524
1525 list_add(&entry->list, &hdev->blacklist);
1526
88c1fe4b 1527 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1528}
1529
88c1fe4b 1530int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1531{
1532 struct bdaddr_list *entry;
b2a66aad 1533
1ec918ce 1534 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1535 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1536
1537 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1538 if (!entry)
5e762444 1539 return -ENOENT;
b2a66aad
AJ
1540
1541 list_del(&entry->list);
1542 kfree(entry);
1543
88c1fe4b 1544 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1545}
1546
db323f2f 1547static void hci_clear_adv_cache(struct work_struct *work)
35815085 1548{
db323f2f 1549 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1550 adv_work.work);
35815085
AG
1551
1552 hci_dev_lock(hdev);
1553
1554 hci_adv_entries_clear(hdev);
1555
1556 hci_dev_unlock(hdev);
1557}
1558
76c8686f
AG
1559int hci_adv_entries_clear(struct hci_dev *hdev)
1560{
1561 struct adv_entry *entry, *tmp;
1562
1563 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1564 list_del(&entry->list);
1565 kfree(entry);
1566 }
1567
1568 BT_DBG("%s adv cache cleared", hdev->name);
1569
1570 return 0;
1571}
1572
1573struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1574{
1575 struct adv_entry *entry;
1576
1577 list_for_each_entry(entry, &hdev->adv_entries, list)
1578 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1579 return entry;
1580
1581 return NULL;
1582}
1583
1584static inline int is_connectable_adv(u8 evt_type)
1585{
1586 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1587 return 1;
1588
1589 return 0;
1590}
1591
1592int hci_add_adv_entry(struct hci_dev *hdev,
04124681 1593 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
76c8686f
AG
1594 return -EINVAL;
1595
1596 /* Only new entries should be added to adv_entries. So, if
1597 * bdaddr was found, don't add it. */
1598 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1599 return 0;
1600
4777bfde 1601 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
76c8686f
AG
1602 if (!entry)
1603 return -ENOMEM;
1604
1605 bacpy(&entry->bdaddr, &ev->bdaddr);
1606 entry->bdaddr_type = ev->bdaddr_type;
1607
1608 list_add(&entry->list, &hdev->adv_entries);
1609
1610 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1611 batostr(&entry->bdaddr), entry->bdaddr_type);
1612
1613 return 0;
1614}
1615
7ba8b4be
AG
1616static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1617{
1618 struct le_scan_params *param = (struct le_scan_params *) opt;
1619 struct hci_cp_le_set_scan_param cp;
1620
1621 memset(&cp, 0, sizeof(cp));
1622 cp.type = param->type;
1623 cp.interval = cpu_to_le16(param->interval);
1624 cp.window = cpu_to_le16(param->window);
1625
1626 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1627}
1628
1629static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1630{
1631 struct hci_cp_le_set_scan_enable cp;
1632
1633 memset(&cp, 0, sizeof(cp));
1634 cp.enable = 1;
1635
1636 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1637}
1638
1639static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1640 u16 window, int timeout)
7ba8b4be
AG
1641{
1642 long timeo = msecs_to_jiffies(3000);
1643 struct le_scan_params param;
1644 int err;
1645
1646 BT_DBG("%s", hdev->name);
1647
1648 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1649 return -EINPROGRESS;
1650
1651 param.type = type;
1652 param.interval = interval;
1653 param.window = window;
1654
1655 hci_req_lock(hdev);
1656
1657 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
04124681 1658 timeo);
7ba8b4be
AG
1659 if (!err)
1660 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1661
1662 hci_req_unlock(hdev);
1663
1664 if (err < 0)
1665 return err;
1666
1667 schedule_delayed_work(&hdev->le_scan_disable,
04124681 1668 msecs_to_jiffies(timeout));
7ba8b4be
AG
1669
1670 return 0;
1671}
1672
7dbfac1d
AG
1673int hci_cancel_le_scan(struct hci_dev *hdev)
1674{
1675 BT_DBG("%s", hdev->name);
1676
1677 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1678 return -EALREADY;
1679
1680 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1681 struct hci_cp_le_set_scan_enable cp;
1682
1683 /* Send HCI command to disable LE Scan */
1684 memset(&cp, 0, sizeof(cp));
1685 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1686 }
1687
1688 return 0;
1689}
1690
7ba8b4be
AG
1691static void le_scan_disable_work(struct work_struct *work)
1692{
1693 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1694 le_scan_disable.work);
7ba8b4be
AG
1695 struct hci_cp_le_set_scan_enable cp;
1696
1697 BT_DBG("%s", hdev->name);
1698
1699 memset(&cp, 0, sizeof(cp));
1700
1701 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1702}
1703
28b75a89
AG
1704static void le_scan_work(struct work_struct *work)
1705{
1706 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1707 struct le_scan_params *param = &hdev->le_scan_params;
1708
1709 BT_DBG("%s", hdev->name);
1710
04124681
GP
1711 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1712 param->timeout);
28b75a89
AG
1713}
1714
1715int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1716 int timeout)
28b75a89
AG
1717{
1718 struct le_scan_params *param = &hdev->le_scan_params;
1719
1720 BT_DBG("%s", hdev->name);
1721
1722 if (work_busy(&hdev->le_scan))
1723 return -EINPROGRESS;
1724
1725 param->type = type;
1726 param->interval = interval;
1727 param->window = window;
1728 param->timeout = timeout;
1729
1730 queue_work(system_long_wq, &hdev->le_scan);
1731
1732 return 0;
1733}
1734
1da177e4
LT
1735/* Register HCI device */
1736int hci_register_dev(struct hci_dev *hdev)
1737{
1738 struct list_head *head = &hci_dev_list, *p;
08add513 1739 int i, id, error;
1da177e4 1740
e9b9cfa1 1741 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1742
010666a1 1743 if (!hdev->open || !hdev->close)
1da177e4
LT
1744 return -EINVAL;
1745
08add513
MM
1746 /* Do not allow HCI_AMP devices to register at index 0,
1747 * so the index can be used as the AMP controller ID.
1748 */
1749 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1750
f20d09d5 1751 write_lock(&hci_dev_list_lock);
1da177e4
LT
1752
1753 /* Find first available device id */
1754 list_for_each(p, &hci_dev_list) {
1755 if (list_entry(p, struct hci_dev, list)->id != id)
1756 break;
1757 head = p; id++;
1758 }
8e87d142 1759
1da177e4
LT
1760 sprintf(hdev->name, "hci%d", id);
1761 hdev->id = id;
c6feeb28 1762 list_add_tail(&hdev->list, head);
1da177e4 1763
09fd0de5 1764 mutex_init(&hdev->lock);
1da177e4
LT
1765
1766 hdev->flags = 0;
d23264a8 1767 hdev->dev_flags = 0;
1da177e4 1768 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1769 hdev->esco_type = (ESCO_HV1);
1da177e4 1770 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1771 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1772
04837f64
MH
1773 hdev->idle_timeout = 0;
1774 hdev->sniff_max_interval = 800;
1775 hdev->sniff_min_interval = 80;
1776
b78752cc 1777 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1778 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1779 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1780
1da177e4
LT
1781
1782 skb_queue_head_init(&hdev->rx_q);
1783 skb_queue_head_init(&hdev->cmd_q);
1784 skb_queue_head_init(&hdev->raw_q);
1785
6bd32326
VT
1786 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1787
cd4c5391 1788 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1789 hdev->reassembly[i] = NULL;
1790
1da177e4 1791 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1792 mutex_init(&hdev->req_lock);
1da177e4 1793
30883512 1794 discovery_init(hdev);
1da177e4
LT
1795
1796 hci_conn_hash_init(hdev);
1797
2e58ef3e
JH
1798 INIT_LIST_HEAD(&hdev->mgmt_pending);
1799
ea4bd8ba 1800 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1801
2aeb9a1a
JH
1802 INIT_LIST_HEAD(&hdev->uuids);
1803
55ed8ca1 1804 INIT_LIST_HEAD(&hdev->link_keys);
b899efaf 1805 INIT_LIST_HEAD(&hdev->long_term_keys);
55ed8ca1 1806
2763eda6
SJ
1807 INIT_LIST_HEAD(&hdev->remote_oob_data);
1808
76c8686f
AG
1809 INIT_LIST_HEAD(&hdev->adv_entries);
1810
db323f2f 1811 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1812 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1813 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1814
16ab91ab
JH
1815 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1816
1da177e4
LT
1817 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1818
1819 atomic_set(&hdev->promisc, 0);
1820
28b75a89
AG
1821 INIT_WORK(&hdev->le_scan, le_scan_work);
1822
7ba8b4be
AG
1823 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1824
f20d09d5 1825 write_unlock(&hci_dev_list_lock);
1da177e4 1826
32845eb1
GP
1827 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1828 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1829 if (!hdev->workqueue) {
1830 error = -ENOMEM;
1831 goto err;
1832 }
f48fd9c8 1833
33ca954d
DH
1834 error = hci_add_sysfs(hdev);
1835 if (error < 0)
1836 goto err_wqueue;
1da177e4 1837
611b30f7
MH
1838 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1839 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1840 if (hdev->rfkill) {
1841 if (rfkill_register(hdev->rfkill) < 0) {
1842 rfkill_destroy(hdev->rfkill);
1843 hdev->rfkill = NULL;
1844 }
1845 }
1846
a8b2d5c2
JH
1847 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1848 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1849 schedule_work(&hdev->power_on);
ab81cbf9 1850
1da177e4 1851 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1852 hci_dev_hold(hdev);
1da177e4
LT
1853
1854 return id;
f48fd9c8 1855
33ca954d
DH
1856err_wqueue:
1857 destroy_workqueue(hdev->workqueue);
1858err:
f20d09d5 1859 write_lock(&hci_dev_list_lock);
f48fd9c8 1860 list_del(&hdev->list);
f20d09d5 1861 write_unlock(&hci_dev_list_lock);
f48fd9c8 1862
33ca954d 1863 return error;
1da177e4
LT
1864}
1865EXPORT_SYMBOL(hci_register_dev);
1866
1867/* Unregister HCI device */
59735631 1868void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1869{
ef222013
MH
1870 int i;
1871
c13854ce 1872 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1873
94324962
JH
1874 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1875
f20d09d5 1876 write_lock(&hci_dev_list_lock);
1da177e4 1877 list_del(&hdev->list);
f20d09d5 1878 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1879
1880 hci_dev_do_close(hdev);
1881
cd4c5391 1882 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1883 kfree_skb(hdev->reassembly[i]);
1884
ab81cbf9 1885 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8b2d5c2 1886 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1887 hci_dev_lock(hdev);
744cf19e 1888 mgmt_index_removed(hdev);
09fd0de5 1889 hci_dev_unlock(hdev);
56e5cb86 1890 }
ab81cbf9 1891
2e58ef3e
JH
1892 /* mgmt_index_removed should take care of emptying the
1893 * pending list */
1894 BUG_ON(!list_empty(&hdev->mgmt_pending));
1895
1da177e4
LT
1896 hci_notify(hdev, HCI_DEV_UNREG);
1897
611b30f7
MH
1898 if (hdev->rfkill) {
1899 rfkill_unregister(hdev->rfkill);
1900 rfkill_destroy(hdev->rfkill);
1901 }
1902
ce242970 1903 hci_del_sysfs(hdev);
147e2d59 1904
db323f2f 1905 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1906
f48fd9c8
MH
1907 destroy_workqueue(hdev->workqueue);
1908
09fd0de5 1909 hci_dev_lock(hdev);
e2e0cacb 1910 hci_blacklist_clear(hdev);
2aeb9a1a 1911 hci_uuids_clear(hdev);
55ed8ca1 1912 hci_link_keys_clear(hdev);
b899efaf 1913 hci_smp_ltks_clear(hdev);
2763eda6 1914 hci_remote_oob_data_clear(hdev);
76c8686f 1915 hci_adv_entries_clear(hdev);
09fd0de5 1916 hci_dev_unlock(hdev);
e2e0cacb 1917
dc946bd8 1918 hci_dev_put(hdev);
1da177e4
LT
1919}
1920EXPORT_SYMBOL(hci_unregister_dev);
1921
1922/* Suspend HCI device */
1923int hci_suspend_dev(struct hci_dev *hdev)
1924{
1925 hci_notify(hdev, HCI_DEV_SUSPEND);
1926 return 0;
1927}
1928EXPORT_SYMBOL(hci_suspend_dev);
1929
1930/* Resume HCI device */
1931int hci_resume_dev(struct hci_dev *hdev)
1932{
1933 hci_notify(hdev, HCI_DEV_RESUME);
1934 return 0;
1935}
1936EXPORT_SYMBOL(hci_resume_dev);
1937
76bca880
MH
1938/* Receive frame from HCI drivers */
1939int hci_recv_frame(struct sk_buff *skb)
1940{
1941 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1942 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1943 && !test_bit(HCI_INIT, &hdev->flags))) {
1944 kfree_skb(skb);
1945 return -ENXIO;
1946 }
1947
1948 /* Incomming skb */
1949 bt_cb(skb)->incoming = 1;
1950
1951 /* Time stamp */
1952 __net_timestamp(skb);
1953
76bca880 1954 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1955 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1956
76bca880
MH
1957 return 0;
1958}
1959EXPORT_SYMBOL(hci_recv_frame);
1960
33e882a5 1961static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1962 int count, __u8 index)
33e882a5
SS
1963{
1964 int len = 0;
1965 int hlen = 0;
1966 int remain = count;
1967 struct sk_buff *skb;
1968 struct bt_skb_cb *scb;
1969
1970 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1971 index >= NUM_REASSEMBLY)
1972 return -EILSEQ;
1973
1974 skb = hdev->reassembly[index];
1975
1976 if (!skb) {
1977 switch (type) {
1978 case HCI_ACLDATA_PKT:
1979 len = HCI_MAX_FRAME_SIZE;
1980 hlen = HCI_ACL_HDR_SIZE;
1981 break;
1982 case HCI_EVENT_PKT:
1983 len = HCI_MAX_EVENT_SIZE;
1984 hlen = HCI_EVENT_HDR_SIZE;
1985 break;
1986 case HCI_SCODATA_PKT:
1987 len = HCI_MAX_SCO_SIZE;
1988 hlen = HCI_SCO_HDR_SIZE;
1989 break;
1990 }
1991
1e429f38 1992 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1993 if (!skb)
1994 return -ENOMEM;
1995
1996 scb = (void *) skb->cb;
1997 scb->expect = hlen;
1998 scb->pkt_type = type;
1999
2000 skb->dev = (void *) hdev;
2001 hdev->reassembly[index] = skb;
2002 }
2003
2004 while (count) {
2005 scb = (void *) skb->cb;
89bb46d0 2006 len = min_t(uint, scb->expect, count);
33e882a5
SS
2007
2008 memcpy(skb_put(skb, len), data, len);
2009
2010 count -= len;
2011 data += len;
2012 scb->expect -= len;
2013 remain = count;
2014
2015 switch (type) {
2016 case HCI_EVENT_PKT:
2017 if (skb->len == HCI_EVENT_HDR_SIZE) {
2018 struct hci_event_hdr *h = hci_event_hdr(skb);
2019 scb->expect = h->plen;
2020
2021 if (skb_tailroom(skb) < scb->expect) {
2022 kfree_skb(skb);
2023 hdev->reassembly[index] = NULL;
2024 return -ENOMEM;
2025 }
2026 }
2027 break;
2028
2029 case HCI_ACLDATA_PKT:
2030 if (skb->len == HCI_ACL_HDR_SIZE) {
2031 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2032 scb->expect = __le16_to_cpu(h->dlen);
2033
2034 if (skb_tailroom(skb) < scb->expect) {
2035 kfree_skb(skb);
2036 hdev->reassembly[index] = NULL;
2037 return -ENOMEM;
2038 }
2039 }
2040 break;
2041
2042 case HCI_SCODATA_PKT:
2043 if (skb->len == HCI_SCO_HDR_SIZE) {
2044 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2045 scb->expect = h->dlen;
2046
2047 if (skb_tailroom(skb) < scb->expect) {
2048 kfree_skb(skb);
2049 hdev->reassembly[index] = NULL;
2050 return -ENOMEM;
2051 }
2052 }
2053 break;
2054 }
2055
2056 if (scb->expect == 0) {
2057 /* Complete frame */
2058
2059 bt_cb(skb)->pkt_type = type;
2060 hci_recv_frame(skb);
2061
2062 hdev->reassembly[index] = NULL;
2063 return remain;
2064 }
2065 }
2066
2067 return remain;
2068}
2069
ef222013
MH
2070int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2071{
f39a3c06
SS
2072 int rem = 0;
2073
ef222013
MH
2074 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2075 return -EILSEQ;
2076
da5f6c37 2077 while (count) {
1e429f38 2078 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2079 if (rem < 0)
2080 return rem;
ef222013 2081
f39a3c06
SS
2082 data += (count - rem);
2083 count = rem;
f81c6224 2084 }
ef222013 2085
f39a3c06 2086 return rem;
ef222013
MH
2087}
2088EXPORT_SYMBOL(hci_recv_fragment);
2089
99811510
SS
2090#define STREAM_REASSEMBLY 0
2091
2092int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2093{
2094 int type;
2095 int rem = 0;
2096
da5f6c37 2097 while (count) {
99811510
SS
2098 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2099
2100 if (!skb) {
2101 struct { char type; } *pkt;
2102
2103 /* Start of the frame */
2104 pkt = data;
2105 type = pkt->type;
2106
2107 data++;
2108 count--;
2109 } else
2110 type = bt_cb(skb)->pkt_type;
2111
1e429f38
GP
2112 rem = hci_reassembly(hdev, type, data, count,
2113 STREAM_REASSEMBLY);
99811510
SS
2114 if (rem < 0)
2115 return rem;
2116
2117 data += (count - rem);
2118 count = rem;
f81c6224 2119 }
99811510
SS
2120
2121 return rem;
2122}
2123EXPORT_SYMBOL(hci_recv_stream_fragment);
2124
1da177e4
LT
2125/* ---- Interface to upper protocols ---- */
2126
1da177e4
LT
2127int hci_register_cb(struct hci_cb *cb)
2128{
2129 BT_DBG("%p name %s", cb, cb->name);
2130
f20d09d5 2131 write_lock(&hci_cb_list_lock);
1da177e4 2132 list_add(&cb->list, &hci_cb_list);
f20d09d5 2133 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2134
2135 return 0;
2136}
2137EXPORT_SYMBOL(hci_register_cb);
2138
2139int hci_unregister_cb(struct hci_cb *cb)
2140{
2141 BT_DBG("%p name %s", cb, cb->name);
2142
f20d09d5 2143 write_lock(&hci_cb_list_lock);
1da177e4 2144 list_del(&cb->list);
f20d09d5 2145 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2146
2147 return 0;
2148}
2149EXPORT_SYMBOL(hci_unregister_cb);
2150
2151static int hci_send_frame(struct sk_buff *skb)
2152{
2153 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2154
2155 if (!hdev) {
2156 kfree_skb(skb);
2157 return -ENODEV;
2158 }
2159
0d48d939 2160 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2161
cd82e61c
MH
2162 /* Time stamp */
2163 __net_timestamp(skb);
1da177e4 2164
cd82e61c
MH
2165 /* Send copy to monitor */
2166 hci_send_to_monitor(hdev, skb);
2167
2168 if (atomic_read(&hdev->promisc)) {
2169 /* Send copy to the sockets */
470fe1b5 2170 hci_send_to_sock(hdev, skb);
1da177e4
LT
2171 }
2172
2173 /* Get rid of skb owner, prior to sending to the driver. */
2174 skb_orphan(skb);
2175
2176 return hdev->send(skb);
2177}
2178
2179/* Send HCI command */
a9de9248 2180int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2181{
2182 int len = HCI_COMMAND_HDR_SIZE + plen;
2183 struct hci_command_hdr *hdr;
2184 struct sk_buff *skb;
2185
a9de9248 2186 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2187
2188 skb = bt_skb_alloc(len, GFP_ATOMIC);
2189 if (!skb) {
ef222013 2190 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2191 return -ENOMEM;
2192 }
2193
2194 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2195 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2196 hdr->plen = plen;
2197
2198 if (plen)
2199 memcpy(skb_put(skb, plen), param, plen);
2200
2201 BT_DBG("skb len %d", skb->len);
2202
0d48d939 2203 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2204 skb->dev = (void *) hdev;
c78ae283 2205
a5040efa
JH
2206 if (test_bit(HCI_INIT, &hdev->flags))
2207 hdev->init_last_cmd = opcode;
2208
1da177e4 2209 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2210 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2211
2212 return 0;
2213}
1da177e4
LT
2214
2215/* Get data from the previously sent command */
a9de9248 2216void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2217{
2218 struct hci_command_hdr *hdr;
2219
2220 if (!hdev->sent_cmd)
2221 return NULL;
2222
2223 hdr = (void *) hdev->sent_cmd->data;
2224
a9de9248 2225 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2226 return NULL;
2227
a9de9248 2228 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2229
2230 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2231}
2232
2233/* Send ACL data */
2234static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2235{
2236 struct hci_acl_hdr *hdr;
2237 int len = skb->len;
2238
badff6d0
ACM
2239 skb_push(skb, HCI_ACL_HDR_SIZE);
2240 skb_reset_transport_header(skb);
9c70220b 2241 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2242 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2243 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2244}
2245
73d80deb
LAD
2246static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2247 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2248{
2249 struct hci_dev *hdev = conn->hdev;
2250 struct sk_buff *list;
2251
70f23020
AE
2252 list = skb_shinfo(skb)->frag_list;
2253 if (!list) {
1da177e4
LT
2254 /* Non fragmented */
2255 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2256
73d80deb 2257 skb_queue_tail(queue, skb);
1da177e4
LT
2258 } else {
2259 /* Fragmented */
2260 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2261
2262 skb_shinfo(skb)->frag_list = NULL;
2263
2264 /* Queue all fragments atomically */
af3e6359 2265 spin_lock(&queue->lock);
1da177e4 2266
73d80deb 2267 __skb_queue_tail(queue, skb);
e702112f
AE
2268
2269 flags &= ~ACL_START;
2270 flags |= ACL_CONT;
1da177e4
LT
2271 do {
2272 skb = list; list = list->next;
8e87d142 2273
1da177e4 2274 skb->dev = (void *) hdev;
0d48d939 2275 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2276 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2277
2278 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2279
73d80deb 2280 __skb_queue_tail(queue, skb);
1da177e4
LT
2281 } while (list);
2282
af3e6359 2283 spin_unlock(&queue->lock);
1da177e4 2284 }
73d80deb
LAD
2285}
2286
2287void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2288{
2289 struct hci_conn *conn = chan->conn;
2290 struct hci_dev *hdev = conn->hdev;
2291
2292 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2293
2294 skb->dev = (void *) hdev;
2295 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2296 hci_add_acl_hdr(skb, conn->handle, flags);
2297
2298 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2299
3eff45ea 2300 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2301}
2302EXPORT_SYMBOL(hci_send_acl);
2303
2304/* Send SCO data */
0d861d8b 2305void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2306{
2307 struct hci_dev *hdev = conn->hdev;
2308 struct hci_sco_hdr hdr;
2309
2310 BT_DBG("%s len %d", hdev->name, skb->len);
2311
aca3192c 2312 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2313 hdr.dlen = skb->len;
2314
badff6d0
ACM
2315 skb_push(skb, HCI_SCO_HDR_SIZE);
2316 skb_reset_transport_header(skb);
9c70220b 2317 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2318
2319 skb->dev = (void *) hdev;
0d48d939 2320 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2321
1da177e4 2322 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2323 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2324}
2325EXPORT_SYMBOL(hci_send_sco);
2326
2327/* ---- HCI TX task (outgoing data) ---- */
2328
2329/* HCI Connection scheduler */
2330static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2331{
2332 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2333 struct hci_conn *conn = NULL, *c;
1da177e4 2334 int num = 0, min = ~0;
1da177e4 2335
8e87d142 2336 /* We don't have to lock device here. Connections are always
1da177e4 2337 * added and removed with TX task disabled. */
bf4c6325
GP
2338
2339 rcu_read_lock();
2340
2341 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2342 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2343 continue;
769be974
MH
2344
2345 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2346 continue;
2347
1da177e4
LT
2348 num++;
2349
2350 if (c->sent < min) {
2351 min = c->sent;
2352 conn = c;
2353 }
52087a79
LAD
2354
2355 if (hci_conn_num(hdev, type) == num)
2356 break;
1da177e4
LT
2357 }
2358
bf4c6325
GP
2359 rcu_read_unlock();
2360
1da177e4 2361 if (conn) {
6ed58ec5
VT
2362 int cnt, q;
2363
2364 switch (conn->type) {
2365 case ACL_LINK:
2366 cnt = hdev->acl_cnt;
2367 break;
2368 case SCO_LINK:
2369 case ESCO_LINK:
2370 cnt = hdev->sco_cnt;
2371 break;
2372 case LE_LINK:
2373 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2374 break;
2375 default:
2376 cnt = 0;
2377 BT_ERR("Unknown link type");
2378 }
2379
2380 q = cnt / num;
1da177e4
LT
2381 *quote = q ? q : 1;
2382 } else
2383 *quote = 0;
2384
2385 BT_DBG("conn %p quote %d", conn, *quote);
2386 return conn;
2387}
2388
bae1f5d9 2389static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2390{
2391 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2392 struct hci_conn *c;
1da177e4 2393
bae1f5d9 2394 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2395
bf4c6325
GP
2396 rcu_read_lock();
2397
1da177e4 2398 /* Kill stalled connections */
bf4c6325 2399 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2400 if (c->type == type && c->sent) {
2401 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2402 hdev->name, batostr(&c->dst));
2403 hci_acl_disconn(c, 0x13);
2404 }
2405 }
bf4c6325
GP
2406
2407 rcu_read_unlock();
1da177e4
LT
2408}
2409
73d80deb
LAD
2410static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2411 int *quote)
1da177e4 2412{
73d80deb
LAD
2413 struct hci_conn_hash *h = &hdev->conn_hash;
2414 struct hci_chan *chan = NULL;
2415 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2416 struct hci_conn *conn;
73d80deb
LAD
2417 int cnt, q, conn_num = 0;
2418
2419 BT_DBG("%s", hdev->name);
2420
bf4c6325
GP
2421 rcu_read_lock();
2422
2423 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2424 struct hci_chan *tmp;
2425
2426 if (conn->type != type)
2427 continue;
2428
2429 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2430 continue;
2431
2432 conn_num++;
2433
8192edef 2434 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2435 struct sk_buff *skb;
2436
2437 if (skb_queue_empty(&tmp->data_q))
2438 continue;
2439
2440 skb = skb_peek(&tmp->data_q);
2441 if (skb->priority < cur_prio)
2442 continue;
2443
2444 if (skb->priority > cur_prio) {
2445 num = 0;
2446 min = ~0;
2447 cur_prio = skb->priority;
2448 }
2449
2450 num++;
2451
2452 if (conn->sent < min) {
2453 min = conn->sent;
2454 chan = tmp;
2455 }
2456 }
2457
2458 if (hci_conn_num(hdev, type) == conn_num)
2459 break;
2460 }
2461
bf4c6325
GP
2462 rcu_read_unlock();
2463
73d80deb
LAD
2464 if (!chan)
2465 return NULL;
2466
2467 switch (chan->conn->type) {
2468 case ACL_LINK:
2469 cnt = hdev->acl_cnt;
2470 break;
2471 case SCO_LINK:
2472 case ESCO_LINK:
2473 cnt = hdev->sco_cnt;
2474 break;
2475 case LE_LINK:
2476 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2477 break;
2478 default:
2479 cnt = 0;
2480 BT_ERR("Unknown link type");
2481 }
2482
2483 q = cnt / num;
2484 *quote = q ? q : 1;
2485 BT_DBG("chan %p quote %d", chan, *quote);
2486 return chan;
2487}
2488
02b20f0b
LAD
2489static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2490{
2491 struct hci_conn_hash *h = &hdev->conn_hash;
2492 struct hci_conn *conn;
2493 int num = 0;
2494
2495 BT_DBG("%s", hdev->name);
2496
bf4c6325
GP
2497 rcu_read_lock();
2498
2499 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2500 struct hci_chan *chan;
2501
2502 if (conn->type != type)
2503 continue;
2504
2505 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2506 continue;
2507
2508 num++;
2509
8192edef 2510 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2511 struct sk_buff *skb;
2512
2513 if (chan->sent) {
2514 chan->sent = 0;
2515 continue;
2516 }
2517
2518 if (skb_queue_empty(&chan->data_q))
2519 continue;
2520
2521 skb = skb_peek(&chan->data_q);
2522 if (skb->priority >= HCI_PRIO_MAX - 1)
2523 continue;
2524
2525 skb->priority = HCI_PRIO_MAX - 1;
2526
2527 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2528 skb->priority);
2529 }
2530
2531 if (hci_conn_num(hdev, type) == num)
2532 break;
2533 }
bf4c6325
GP
2534
2535 rcu_read_unlock();
2536
02b20f0b
LAD
2537}
2538
b71d385a
AE
2539static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2540{
2541 /* Calculate count of blocks used by this packet */
2542 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2543}
2544
63d2bc1b 2545static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2546{
1da177e4
LT
2547 if (!test_bit(HCI_RAW, &hdev->flags)) {
2548 /* ACL tx timeout must be longer than maximum
2549 * link supervision timeout (40.9 seconds) */
63d2bc1b 2550 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
cc48dc0a 2551 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2552 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2553 }
63d2bc1b 2554}
1da177e4 2555
63d2bc1b
AE
2556static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2557{
2558 unsigned int cnt = hdev->acl_cnt;
2559 struct hci_chan *chan;
2560 struct sk_buff *skb;
2561 int quote;
2562
2563 __check_timeout(hdev, cnt);
04837f64 2564
73d80deb
LAD
2565 while (hdev->acl_cnt &&
2566 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2567 u32 priority = (skb_peek(&chan->data_q))->priority;
2568 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2569 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2570 skb->len, skb->priority);
2571
ec1cce24
LAD
2572 /* Stop if priority has changed */
2573 if (skb->priority < priority)
2574 break;
2575
2576 skb = skb_dequeue(&chan->data_q);
2577
73d80deb 2578 hci_conn_enter_active_mode(chan->conn,
04124681 2579 bt_cb(skb)->force_active);
04837f64 2580
1da177e4
LT
2581 hci_send_frame(skb);
2582 hdev->acl_last_tx = jiffies;
2583
2584 hdev->acl_cnt--;
73d80deb
LAD
2585 chan->sent++;
2586 chan->conn->sent++;
1da177e4
LT
2587 }
2588 }
02b20f0b
LAD
2589
2590 if (cnt != hdev->acl_cnt)
2591 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2592}
2593
b71d385a
AE
2594static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2595{
63d2bc1b 2596 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2597 struct hci_chan *chan;
2598 struct sk_buff *skb;
2599 int quote;
b71d385a 2600
63d2bc1b 2601 __check_timeout(hdev, cnt);
b71d385a
AE
2602
2603 while (hdev->block_cnt > 0 &&
2604 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2605 u32 priority = (skb_peek(&chan->data_q))->priority;
2606 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2607 int blocks;
2608
2609 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2610 skb->len, skb->priority);
2611
2612 /* Stop if priority has changed */
2613 if (skb->priority < priority)
2614 break;
2615
2616 skb = skb_dequeue(&chan->data_q);
2617
2618 blocks = __get_blocks(hdev, skb);
2619 if (blocks > hdev->block_cnt)
2620 return;
2621
2622 hci_conn_enter_active_mode(chan->conn,
2623 bt_cb(skb)->force_active);
2624
2625 hci_send_frame(skb);
2626 hdev->acl_last_tx = jiffies;
2627
2628 hdev->block_cnt -= blocks;
2629 quote -= blocks;
2630
2631 chan->sent += blocks;
2632 chan->conn->sent += blocks;
2633 }
2634 }
2635
2636 if (cnt != hdev->block_cnt)
2637 hci_prio_recalculate(hdev, ACL_LINK);
2638}
2639
2640static inline void hci_sched_acl(struct hci_dev *hdev)
2641{
2642 BT_DBG("%s", hdev->name);
2643
2644 if (!hci_conn_num(hdev, ACL_LINK))
2645 return;
2646
2647 switch (hdev->flow_ctl_mode) {
2648 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2649 hci_sched_acl_pkt(hdev);
2650 break;
2651
2652 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2653 hci_sched_acl_blk(hdev);
2654 break;
2655 }
2656}
2657
1da177e4
LT
2658/* Schedule SCO */
2659static inline void hci_sched_sco(struct hci_dev *hdev)
2660{
2661 struct hci_conn *conn;
2662 struct sk_buff *skb;
2663 int quote;
2664
2665 BT_DBG("%s", hdev->name);
2666
52087a79
LAD
2667 if (!hci_conn_num(hdev, SCO_LINK))
2668 return;
2669
1da177e4
LT
2670 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2671 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2672 BT_DBG("skb %p len %d", skb, skb->len);
2673 hci_send_frame(skb);
2674
2675 conn->sent++;
2676 if (conn->sent == ~0)
2677 conn->sent = 0;
2678 }
2679 }
2680}
2681
b6a0dc82
MH
2682static inline void hci_sched_esco(struct hci_dev *hdev)
2683{
2684 struct hci_conn *conn;
2685 struct sk_buff *skb;
2686 int quote;
2687
2688 BT_DBG("%s", hdev->name);
2689
52087a79
LAD
2690 if (!hci_conn_num(hdev, ESCO_LINK))
2691 return;
2692
b6a0dc82
MH
2693 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2694 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2695 BT_DBG("skb %p len %d", skb, skb->len);
2696 hci_send_frame(skb);
2697
2698 conn->sent++;
2699 if (conn->sent == ~0)
2700 conn->sent = 0;
2701 }
2702 }
2703}
2704
6ed58ec5
VT
2705static inline void hci_sched_le(struct hci_dev *hdev)
2706{
73d80deb 2707 struct hci_chan *chan;
6ed58ec5 2708 struct sk_buff *skb;
02b20f0b 2709 int quote, cnt, tmp;
6ed58ec5
VT
2710
2711 BT_DBG("%s", hdev->name);
2712
52087a79
LAD
2713 if (!hci_conn_num(hdev, LE_LINK))
2714 return;
2715
6ed58ec5
VT
2716 if (!test_bit(HCI_RAW, &hdev->flags)) {
2717 /* LE tx timeout must be longer than maximum
2718 * link supervision timeout (40.9 seconds) */
bae1f5d9 2719 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2720 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2721 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2722 }
2723
2724 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2725 tmp = cnt;
73d80deb 2726 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2727 u32 priority = (skb_peek(&chan->data_q))->priority;
2728 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2729 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2730 skb->len, skb->priority);
6ed58ec5 2731
ec1cce24
LAD
2732 /* Stop if priority has changed */
2733 if (skb->priority < priority)
2734 break;
2735
2736 skb = skb_dequeue(&chan->data_q);
2737
6ed58ec5
VT
2738 hci_send_frame(skb);
2739 hdev->le_last_tx = jiffies;
2740
2741 cnt--;
73d80deb
LAD
2742 chan->sent++;
2743 chan->conn->sent++;
6ed58ec5
VT
2744 }
2745 }
73d80deb 2746
6ed58ec5
VT
2747 if (hdev->le_pkts)
2748 hdev->le_cnt = cnt;
2749 else
2750 hdev->acl_cnt = cnt;
02b20f0b
LAD
2751
2752 if (cnt != tmp)
2753 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2754}
2755
3eff45ea 2756static void hci_tx_work(struct work_struct *work)
1da177e4 2757{
3eff45ea 2758 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2759 struct sk_buff *skb;
2760
6ed58ec5
VT
2761 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2762 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2763
2764 /* Schedule queues and send stuff to HCI driver */
2765
2766 hci_sched_acl(hdev);
2767
2768 hci_sched_sco(hdev);
2769
b6a0dc82
MH
2770 hci_sched_esco(hdev);
2771
6ed58ec5
VT
2772 hci_sched_le(hdev);
2773
1da177e4
LT
2774 /* Send next queued raw (unknown type) packet */
2775 while ((skb = skb_dequeue(&hdev->raw_q)))
2776 hci_send_frame(skb);
1da177e4
LT
2777}
2778
25985edc 2779/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2780
2781/* ACL data packet */
2782static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2783{
2784 struct hci_acl_hdr *hdr = (void *) skb->data;
2785 struct hci_conn *conn;
2786 __u16 handle, flags;
2787
2788 skb_pull(skb, HCI_ACL_HDR_SIZE);
2789
2790 handle = __le16_to_cpu(hdr->handle);
2791 flags = hci_flags(handle);
2792 handle = hci_handle(handle);
2793
2794 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2795
2796 hdev->stat.acl_rx++;
2797
2798 hci_dev_lock(hdev);
2799 conn = hci_conn_hash_lookup_handle(hdev, handle);
2800 hci_dev_unlock(hdev);
8e87d142 2801
1da177e4 2802 if (conn) {
65983fc7 2803 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2804
1da177e4 2805 /* Send to upper protocol */
686ebf28
UF
2806 l2cap_recv_acldata(conn, skb, flags);
2807 return;
1da177e4 2808 } else {
8e87d142 2809 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2810 hdev->name, handle);
2811 }
2812
2813 kfree_skb(skb);
2814}
2815
2816/* SCO data packet */
2817static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2818{
2819 struct hci_sco_hdr *hdr = (void *) skb->data;
2820 struct hci_conn *conn;
2821 __u16 handle;
2822
2823 skb_pull(skb, HCI_SCO_HDR_SIZE);
2824
2825 handle = __le16_to_cpu(hdr->handle);
2826
2827 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2828
2829 hdev->stat.sco_rx++;
2830
2831 hci_dev_lock(hdev);
2832 conn = hci_conn_hash_lookup_handle(hdev, handle);
2833 hci_dev_unlock(hdev);
2834
2835 if (conn) {
1da177e4 2836 /* Send to upper protocol */
686ebf28
UF
2837 sco_recv_scodata(conn, skb);
2838 return;
1da177e4 2839 } else {
8e87d142 2840 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2841 hdev->name, handle);
2842 }
2843
2844 kfree_skb(skb);
2845}
2846
b78752cc 2847static void hci_rx_work(struct work_struct *work)
1da177e4 2848{
b78752cc 2849 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2850 struct sk_buff *skb;
2851
2852 BT_DBG("%s", hdev->name);
2853
1da177e4 2854 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2855 /* Send copy to monitor */
2856 hci_send_to_monitor(hdev, skb);
2857
1da177e4
LT
2858 if (atomic_read(&hdev->promisc)) {
2859 /* Send copy to the sockets */
470fe1b5 2860 hci_send_to_sock(hdev, skb);
1da177e4
LT
2861 }
2862
2863 if (test_bit(HCI_RAW, &hdev->flags)) {
2864 kfree_skb(skb);
2865 continue;
2866 }
2867
2868 if (test_bit(HCI_INIT, &hdev->flags)) {
2869 /* Don't process data packets in this states. */
0d48d939 2870 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2871 case HCI_ACLDATA_PKT:
2872 case HCI_SCODATA_PKT:
2873 kfree_skb(skb);
2874 continue;
3ff50b79 2875 }
1da177e4
LT
2876 }
2877
2878 /* Process frame */
0d48d939 2879 switch (bt_cb(skb)->pkt_type) {
1da177e4 2880 case HCI_EVENT_PKT:
b78752cc 2881 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2882 hci_event_packet(hdev, skb);
2883 break;
2884
2885 case HCI_ACLDATA_PKT:
2886 BT_DBG("%s ACL data packet", hdev->name);
2887 hci_acldata_packet(hdev, skb);
2888 break;
2889
2890 case HCI_SCODATA_PKT:
2891 BT_DBG("%s SCO data packet", hdev->name);
2892 hci_scodata_packet(hdev, skb);
2893 break;
2894
2895 default:
2896 kfree_skb(skb);
2897 break;
2898 }
2899 }
1da177e4
LT
2900}
2901
c347b765 2902static void hci_cmd_work(struct work_struct *work)
1da177e4 2903{
c347b765 2904 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2905 struct sk_buff *skb;
2906
2907 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2908
1da177e4 2909 /* Send queued commands */
5a08ecce
AE
2910 if (atomic_read(&hdev->cmd_cnt)) {
2911 skb = skb_dequeue(&hdev->cmd_q);
2912 if (!skb)
2913 return;
2914
7585b97a 2915 kfree_skb(hdev->sent_cmd);
1da177e4 2916
70f23020
AE
2917 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2918 if (hdev->sent_cmd) {
1da177e4
LT
2919 atomic_dec(&hdev->cmd_cnt);
2920 hci_send_frame(skb);
7bdb8a5c
SJ
2921 if (test_bit(HCI_RESET, &hdev->flags))
2922 del_timer(&hdev->cmd_timer);
2923 else
2924 mod_timer(&hdev->cmd_timer,
6bd32326 2925 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2926 } else {
2927 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2928 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2929 }
2930 }
2931}
2519a1fc
AG
2932
2933int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2934{
2935 /* General inquiry access code (GIAC) */
2936 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2937 struct hci_cp_inquiry cp;
2938
2939 BT_DBG("%s", hdev->name);
2940
2941 if (test_bit(HCI_INQUIRY, &hdev->flags))
2942 return -EINPROGRESS;
2943
4663262c
JH
2944 inquiry_cache_flush(hdev);
2945
2519a1fc
AG
2946 memset(&cp, 0, sizeof(cp));
2947 memcpy(&cp.lap, lap, sizeof(cp.lap));
2948 cp.length = length;
2949
2950 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2951}
023d5049
AG
2952
2953int hci_cancel_inquiry(struct hci_dev *hdev)
2954{
2955 BT_DBG("%s", hdev->name);
2956
2957 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 2958 return -EALREADY;
023d5049
AG
2959
2960 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2961}