Bluetooth: Correct debug print specifier for u16 objects
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
ab81cbf9
JH
36#define AUTO_OFF_TIMEOUT 2000
37
b78752cc 38static void hci_rx_work(struct work_struct *work);
c347b765 39static void hci_cmd_work(struct work_struct *work);
3eff45ea 40static void hci_tx_work(struct work_struct *work);
1da177e4 41
1da177e4
LT
42/* HCI device list */
43LIST_HEAD(hci_dev_list);
44DEFINE_RWLOCK(hci_dev_list_lock);
45
46/* HCI callback list */
47LIST_HEAD(hci_cb_list);
48DEFINE_RWLOCK(hci_cb_list_lock);
49
3df92b31
SL
50/* HCI ID Numbering */
51static DEFINE_IDA(hci_index_ida);
52
1da177e4
LT
53/* ---- HCI notifications ---- */
54
6516455d 55static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 56{
040030ef 57 hci_sock_dev_event(hdev, event);
1da177e4
LT
58}
59
60/* ---- HCI requests ---- */
61
23bb5763 62void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 63{
f0e09510 64 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
23bb5763 65
a5040efa
JH
66 /* If this is the init phase check if the completed command matches
67 * the last init command, and if not just return.
68 */
75fb0e32
JH
69 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
70 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1036b890 71 u16 opcode = __le16_to_cpu(sent->opcode);
75fb0e32
JH
72 struct sk_buff *skb;
73
74 /* Some CSR based controllers generate a spontaneous
75 * reset complete event during init and any pending
76 * command will never be completed. In such a case we
77 * need to resend whatever was the last sent
78 * command.
79 */
80
1036b890 81 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
75fb0e32
JH
82 return;
83
84 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
85 if (skb) {
86 skb_queue_head(&hdev->cmd_q, skb);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88 }
89
23bb5763 90 return;
75fb0e32 91 }
1da177e4
LT
92
93 if (hdev->req_status == HCI_REQ_PEND) {
94 hdev->req_result = result;
95 hdev->req_status = HCI_REQ_DONE;
96 wake_up_interruptible(&hdev->req_wait_q);
97 }
98}
99
100static void hci_req_cancel(struct hci_dev *hdev, int err)
101{
102 BT_DBG("%s err 0x%2.2x", hdev->name, err);
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = err;
106 hdev->req_status = HCI_REQ_CANCELED;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111/* Execute request and wait for completion. */
a8c5fb1a
GP
112static int __hci_request(struct hci_dev *hdev,
113 void (*req)(struct hci_dev *hdev, unsigned long opt),
114 unsigned long opt, __u32 timeout)
1da177e4
LT
115{
116 DECLARE_WAITQUEUE(wait, current);
117 int err = 0;
118
119 BT_DBG("%s start", hdev->name);
120
121 hdev->req_status = HCI_REQ_PEND;
122
123 add_wait_queue(&hdev->req_wait_q, &wait);
124 set_current_state(TASK_INTERRUPTIBLE);
125
126 req(hdev, opt);
127 schedule_timeout(timeout);
128
129 remove_wait_queue(&hdev->req_wait_q, &wait);
130
131 if (signal_pending(current))
132 return -EINTR;
133
134 switch (hdev->req_status) {
135 case HCI_REQ_DONE:
e175072f 136 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
137 break;
138
139 case HCI_REQ_CANCELED:
140 err = -hdev->req_result;
141 break;
142
143 default:
144 err = -ETIMEDOUT;
145 break;
3ff50b79 146 }
1da177e4 147
a5040efa 148 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
149
150 BT_DBG("%s end: err %d", hdev->name, err);
151
152 return err;
153}
154
6039aa73
GP
155static int hci_request(struct hci_dev *hdev,
156 void (*req)(struct hci_dev *hdev, unsigned long opt),
157 unsigned long opt, __u32 timeout)
1da177e4
LT
158{
159 int ret;
160
7c6a329e
MH
161 if (!test_bit(HCI_UP, &hdev->flags))
162 return -ENETDOWN;
163
1da177e4
LT
164 /* Serialize all requests */
165 hci_req_lock(hdev);
166 ret = __hci_request(hdev, req, opt, timeout);
167 hci_req_unlock(hdev);
168
169 return ret;
170}
171
172static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
173{
174 BT_DBG("%s %ld", hdev->name, opt);
175
176 /* Reset device */
f630cf0d 177 set_bit(HCI_RESET, &hdev->flags);
a9de9248 178 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
179}
180
e61ef499 181static void bredr_init(struct hci_dev *hdev)
1da177e4 182{
b0916ea0 183 struct hci_cp_delete_stored_link_key cp;
1ebb9252 184 __le16 param;
89f2783d 185 __u8 flt_type;
1da177e4 186
2455a3ea
AE
187 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
188
1da177e4
LT
189 /* Mandatory initialization */
190
191 /* Reset */
a6c511c6 192 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
e61ef499
AE
193 set_bit(HCI_RESET, &hdev->flags);
194 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 195 }
1da177e4
LT
196
197 /* Read Local Supported Features */
a9de9248 198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 199
1143e5a6 200 /* Read Local Version */
a9de9248 201 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 202
1da177e4 203 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 204 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 205
1da177e4 206 /* Read BD Address */
a9de9248
MH
207 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
208
209 /* Read Class of Device */
210 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
211
212 /* Read Local Name */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
214
215 /* Read Voice Setting */
a9de9248 216 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
217
218 /* Optional initialization */
219
220 /* Clear Event Filters */
89f2783d 221 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 222 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 223
1da177e4 224 /* Connection accept timeout ~20 secs */
82781e63 225 param = __constant_cpu_to_le16(0x7d00);
a9de9248 226 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
227
228 bacpy(&cp.bdaddr, BDADDR_ANY);
229 cp.delete_all = 1;
230 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
231}
232
e61ef499
AE
233static void amp_init(struct hci_dev *hdev)
234{
2455a3ea
AE
235 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
236
e61ef499
AE
237 /* Reset */
238 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
239
240 /* Read Local Version */
241 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
242
243 /* Read Local AMP Info */
244 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e61ef499
AE
245}
246
247static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
248{
249 struct sk_buff *skb;
250
251 BT_DBG("%s %ld", hdev->name, opt);
252
253 /* Driver initialization */
254
255 /* Special commands */
256 while ((skb = skb_dequeue(&hdev->driver_init))) {
257 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
258 skb->dev = (void *) hdev;
259
260 skb_queue_tail(&hdev->cmd_q, skb);
261 queue_work(hdev->workqueue, &hdev->cmd_work);
262 }
263 skb_queue_purge(&hdev->driver_init);
264
265 switch (hdev->dev_type) {
266 case HCI_BREDR:
267 bredr_init(hdev);
268 break;
269
270 case HCI_AMP:
271 amp_init(hdev);
272 break;
273
274 default:
275 BT_ERR("Unknown device type %d", hdev->dev_type);
276 break;
277 }
278
279}
280
6ed58ec5
VT
281static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
282{
283 BT_DBG("%s", hdev->name);
284
285 /* Read LE buffer size */
286 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
287}
288
1da177e4
LT
289static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 scan = opt;
292
293 BT_DBG("%s %x", hdev->name, scan);
294
295 /* Inquiry and Page scans */
a9de9248 296 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
297}
298
299static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 auth = opt;
302
303 BT_DBG("%s %x", hdev->name, auth);
304
305 /* Authentication */
a9de9248 306 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
307}
308
309static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 encrypt = opt;
312
313 BT_DBG("%s %x", hdev->name, encrypt);
314
e4e8e37c 315 /* Encryption */
a9de9248 316 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
317}
318
e4e8e37c
MH
319static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __le16 policy = cpu_to_le16(opt);
322
a418b893 323 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
324
325 /* Default link policy */
326 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
327}
328
8e87d142 329/* Get HCI device by index.
1da177e4
LT
330 * Device is held on return. */
331struct hci_dev *hci_dev_get(int index)
332{
8035ded4 333 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
334
335 BT_DBG("%d", index);
336
337 if (index < 0)
338 return NULL;
339
340 read_lock(&hci_dev_list_lock);
8035ded4 341 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
342 if (d->id == index) {
343 hdev = hci_dev_hold(d);
344 break;
345 }
346 }
347 read_unlock(&hci_dev_list_lock);
348 return hdev;
349}
1da177e4
LT
350
351/* ---- Inquiry support ---- */
ff9ef578 352
30dc78e1
JH
353bool hci_discovery_active(struct hci_dev *hdev)
354{
355 struct discovery_state *discov = &hdev->discovery;
356
6fbe195d 357 switch (discov->state) {
343f935b 358 case DISCOVERY_FINDING:
6fbe195d 359 case DISCOVERY_RESOLVING:
30dc78e1
JH
360 return true;
361
6fbe195d
AG
362 default:
363 return false;
364 }
30dc78e1
JH
365}
366
ff9ef578
JH
367void hci_discovery_set_state(struct hci_dev *hdev, int state)
368{
369 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
370
371 if (hdev->discovery.state == state)
372 return;
373
374 switch (state) {
375 case DISCOVERY_STOPPED:
7b99b659
AG
376 if (hdev->discovery.state != DISCOVERY_STARTING)
377 mgmt_discovering(hdev, 0);
ff9ef578
JH
378 break;
379 case DISCOVERY_STARTING:
380 break;
343f935b 381 case DISCOVERY_FINDING:
ff9ef578
JH
382 mgmt_discovering(hdev, 1);
383 break;
30dc78e1
JH
384 case DISCOVERY_RESOLVING:
385 break;
ff9ef578
JH
386 case DISCOVERY_STOPPING:
387 break;
388 }
389
390 hdev->discovery.state = state;
391}
392
1da177e4
LT
393static void inquiry_cache_flush(struct hci_dev *hdev)
394{
30883512 395 struct discovery_state *cache = &hdev->discovery;
b57c1a56 396 struct inquiry_entry *p, *n;
1da177e4 397
561aafbc
JH
398 list_for_each_entry_safe(p, n, &cache->all, all) {
399 list_del(&p->all);
b57c1a56 400 kfree(p);
1da177e4 401 }
561aafbc
JH
402
403 INIT_LIST_HEAD(&cache->unknown);
404 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
405}
406
a8c5fb1a
GP
407struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
408 bdaddr_t *bdaddr)
1da177e4 409{
30883512 410 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
411 struct inquiry_entry *e;
412
413 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
414
561aafbc
JH
415 list_for_each_entry(e, &cache->all, all) {
416 if (!bacmp(&e->data.bdaddr, bdaddr))
417 return e;
418 }
419
420 return NULL;
421}
422
423struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 424 bdaddr_t *bdaddr)
561aafbc 425{
30883512 426 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
427 struct inquiry_entry *e;
428
429 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
430
431 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 432 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
433 return e;
434 }
435
436 return NULL;
1da177e4
LT
437}
438
30dc78e1 439struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
440 bdaddr_t *bdaddr,
441 int state)
30dc78e1
JH
442{
443 struct discovery_state *cache = &hdev->discovery;
444 struct inquiry_entry *e;
445
446 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
447
448 list_for_each_entry(e, &cache->resolve, list) {
449 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
450 return e;
451 if (!bacmp(&e->data.bdaddr, bdaddr))
452 return e;
453 }
454
455 return NULL;
456}
457
a3d4e20a 458void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 459 struct inquiry_entry *ie)
a3d4e20a
JH
460{
461 struct discovery_state *cache = &hdev->discovery;
462 struct list_head *pos = &cache->resolve;
463 struct inquiry_entry *p;
464
465 list_del(&ie->list);
466
467 list_for_each_entry(p, &cache->resolve, list) {
468 if (p->name_state != NAME_PENDING &&
a8c5fb1a 469 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
470 break;
471 pos = &p->list;
472 }
473
474 list_add(&ie->list, pos);
475}
476
3175405b 477bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 478 bool name_known, bool *ssp)
1da177e4 479{
30883512 480 struct discovery_state *cache = &hdev->discovery;
70f23020 481 struct inquiry_entry *ie;
1da177e4
LT
482
483 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
484
388fc8fa
JH
485 if (ssp)
486 *ssp = data->ssp_mode;
487
70f23020 488 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 489 if (ie) {
388fc8fa
JH
490 if (ie->data.ssp_mode && ssp)
491 *ssp = true;
492
a3d4e20a 493 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 494 data->rssi != ie->data.rssi) {
a3d4e20a
JH
495 ie->data.rssi = data->rssi;
496 hci_inquiry_cache_update_resolve(hdev, ie);
497 }
498
561aafbc 499 goto update;
a3d4e20a 500 }
561aafbc
JH
501
502 /* Entry not in the cache. Add new one. */
503 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
504 if (!ie)
3175405b 505 return false;
561aafbc
JH
506
507 list_add(&ie->all, &cache->all);
508
509 if (name_known) {
510 ie->name_state = NAME_KNOWN;
511 } else {
512 ie->name_state = NAME_NOT_KNOWN;
513 list_add(&ie->list, &cache->unknown);
514 }
70f23020 515
561aafbc
JH
516update:
517 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 518 ie->name_state != NAME_PENDING) {
561aafbc
JH
519 ie->name_state = NAME_KNOWN;
520 list_del(&ie->list);
1da177e4
LT
521 }
522
70f23020
AE
523 memcpy(&ie->data, data, sizeof(*data));
524 ie->timestamp = jiffies;
1da177e4 525 cache->timestamp = jiffies;
3175405b
JH
526
527 if (ie->name_state == NAME_NOT_KNOWN)
528 return false;
529
530 return true;
1da177e4
LT
531}
532
533static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
534{
30883512 535 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
536 struct inquiry_info *info = (struct inquiry_info *) buf;
537 struct inquiry_entry *e;
538 int copied = 0;
539
561aafbc 540 list_for_each_entry(e, &cache->all, all) {
1da177e4 541 struct inquiry_data *data = &e->data;
b57c1a56
JH
542
543 if (copied >= num)
544 break;
545
1da177e4
LT
546 bacpy(&info->bdaddr, &data->bdaddr);
547 info->pscan_rep_mode = data->pscan_rep_mode;
548 info->pscan_period_mode = data->pscan_period_mode;
549 info->pscan_mode = data->pscan_mode;
550 memcpy(info->dev_class, data->dev_class, 3);
551 info->clock_offset = data->clock_offset;
b57c1a56 552
1da177e4 553 info++;
b57c1a56 554 copied++;
1da177e4
LT
555 }
556
557 BT_DBG("cache %p, copied %d", cache, copied);
558 return copied;
559}
560
561static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
562{
563 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
564 struct hci_cp_inquiry cp;
565
566 BT_DBG("%s", hdev->name);
567
568 if (test_bit(HCI_INQUIRY, &hdev->flags))
569 return;
570
571 /* Start Inquiry */
572 memcpy(&cp.lap, &ir->lap, 3);
573 cp.length = ir->length;
574 cp.num_rsp = ir->num_rsp;
a9de9248 575 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
576}
577
578int hci_inquiry(void __user *arg)
579{
580 __u8 __user *ptr = arg;
581 struct hci_inquiry_req ir;
582 struct hci_dev *hdev;
583 int err = 0, do_inquiry = 0, max_rsp;
584 long timeo;
585 __u8 *buf;
586
587 if (copy_from_user(&ir, ptr, sizeof(ir)))
588 return -EFAULT;
589
5a08ecce
AE
590 hdev = hci_dev_get(ir.dev_id);
591 if (!hdev)
1da177e4
LT
592 return -ENODEV;
593
09fd0de5 594 hci_dev_lock(hdev);
8e87d142 595 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 596 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
597 inquiry_cache_flush(hdev);
598 do_inquiry = 1;
599 }
09fd0de5 600 hci_dev_unlock(hdev);
1da177e4 601
04837f64 602 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
603
604 if (do_inquiry) {
605 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
606 if (err < 0)
607 goto done;
608 }
1da177e4 609
8fc9ced3
GP
610 /* for unlimited number of responses we will use buffer with
611 * 255 entries
612 */
1da177e4
LT
613 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
614
615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616 * copy it to the user space.
617 */
01df8c31 618 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 619 if (!buf) {
1da177e4
LT
620 err = -ENOMEM;
621 goto done;
622 }
623
09fd0de5 624 hci_dev_lock(hdev);
1da177e4 625 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 626 hci_dev_unlock(hdev);
1da177e4
LT
627
628 BT_DBG("num_rsp %d", ir.num_rsp);
629
630 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
631 ptr += sizeof(ir);
632 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 633 ir.num_rsp))
1da177e4 634 err = -EFAULT;
8e87d142 635 } else
1da177e4
LT
636 err = -EFAULT;
637
638 kfree(buf);
639
640done:
641 hci_dev_put(hdev);
642 return err;
643}
644
645/* ---- HCI ioctl helpers ---- */
646
647int hci_dev_open(__u16 dev)
648{
649 struct hci_dev *hdev;
650 int ret = 0;
651
5a08ecce
AE
652 hdev = hci_dev_get(dev);
653 if (!hdev)
1da177e4
LT
654 return -ENODEV;
655
656 BT_DBG("%s %p", hdev->name, hdev);
657
658 hci_req_lock(hdev);
659
94324962
JH
660 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
661 ret = -ENODEV;
662 goto done;
663 }
664
611b30f7
MH
665 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
666 ret = -ERFKILL;
667 goto done;
668 }
669
1da177e4
LT
670 if (test_bit(HCI_UP, &hdev->flags)) {
671 ret = -EALREADY;
672 goto done;
673 }
674
675 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
676 set_bit(HCI_RAW, &hdev->flags);
677
07e3b94a
AE
678 /* Treat all non BR/EDR controllers as raw devices if
679 enable_hs is not set */
680 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
681 set_bit(HCI_RAW, &hdev->flags);
682
1da177e4
LT
683 if (hdev->open(hdev)) {
684 ret = -EIO;
685 goto done;
686 }
687
688 if (!test_bit(HCI_RAW, &hdev->flags)) {
689 atomic_set(&hdev->cmd_cnt, 1);
690 set_bit(HCI_INIT, &hdev->flags);
a5040efa 691 hdev->init_last_cmd = 0;
1da177e4 692
5f246e89 693 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
1da177e4 694
eead27da 695 if (lmp_host_le_capable(hdev))
6ed58ec5 696 ret = __hci_request(hdev, hci_le_init_req, 0,
5f246e89 697 HCI_INIT_TIMEOUT);
6ed58ec5 698
1da177e4
LT
699 clear_bit(HCI_INIT, &hdev->flags);
700 }
701
702 if (!ret) {
703 hci_dev_hold(hdev);
704 set_bit(HCI_UP, &hdev->flags);
705 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 706 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 707 hci_dev_lock(hdev);
744cf19e 708 mgmt_powered(hdev, 1);
09fd0de5 709 hci_dev_unlock(hdev);
56e5cb86 710 }
8e87d142 711 } else {
1da177e4 712 /* Init failed, cleanup */
3eff45ea 713 flush_work(&hdev->tx_work);
c347b765 714 flush_work(&hdev->cmd_work);
b78752cc 715 flush_work(&hdev->rx_work);
1da177e4
LT
716
717 skb_queue_purge(&hdev->cmd_q);
718 skb_queue_purge(&hdev->rx_q);
719
720 if (hdev->flush)
721 hdev->flush(hdev);
722
723 if (hdev->sent_cmd) {
724 kfree_skb(hdev->sent_cmd);
725 hdev->sent_cmd = NULL;
726 }
727
728 hdev->close(hdev);
729 hdev->flags = 0;
730 }
731
732done:
733 hci_req_unlock(hdev);
734 hci_dev_put(hdev);
735 return ret;
736}
737
738static int hci_dev_do_close(struct hci_dev *hdev)
739{
740 BT_DBG("%s %p", hdev->name, hdev);
741
28b75a89
AG
742 cancel_work_sync(&hdev->le_scan);
743
1da177e4
LT
744 hci_req_cancel(hdev, ENODEV);
745 hci_req_lock(hdev);
746
747 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 748 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
749 hci_req_unlock(hdev);
750 return 0;
751 }
752
3eff45ea
GP
753 /* Flush RX and TX works */
754 flush_work(&hdev->tx_work);
b78752cc 755 flush_work(&hdev->rx_work);
1da177e4 756
16ab91ab 757 if (hdev->discov_timeout > 0) {
e0f9309f 758 cancel_delayed_work(&hdev->discov_off);
16ab91ab 759 hdev->discov_timeout = 0;
5e5282bb 760 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
761 }
762
a8b2d5c2 763 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
764 cancel_delayed_work(&hdev->service_cache);
765
7ba8b4be
AG
766 cancel_delayed_work_sync(&hdev->le_scan_disable);
767
09fd0de5 768 hci_dev_lock(hdev);
1da177e4
LT
769 inquiry_cache_flush(hdev);
770 hci_conn_hash_flush(hdev);
09fd0de5 771 hci_dev_unlock(hdev);
1da177e4
LT
772
773 hci_notify(hdev, HCI_DEV_DOWN);
774
775 if (hdev->flush)
776 hdev->flush(hdev);
777
778 /* Reset device */
779 skb_queue_purge(&hdev->cmd_q);
780 atomic_set(&hdev->cmd_cnt, 1);
8af59467 781 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 782 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 783 set_bit(HCI_INIT, &hdev->flags);
5f246e89 784 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
785 clear_bit(HCI_INIT, &hdev->flags);
786 }
787
c347b765
GP
788 /* flush cmd work */
789 flush_work(&hdev->cmd_work);
1da177e4
LT
790
791 /* Drop queues */
792 skb_queue_purge(&hdev->rx_q);
793 skb_queue_purge(&hdev->cmd_q);
794 skb_queue_purge(&hdev->raw_q);
795
796 /* Drop last sent command */
797 if (hdev->sent_cmd) {
b79f44c1 798 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
799 kfree_skb(hdev->sent_cmd);
800 hdev->sent_cmd = NULL;
801 }
802
803 /* After this point our queues are empty
804 * and no tasks are scheduled. */
805 hdev->close(hdev);
806
8ee56540
MH
807 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
808 hci_dev_lock(hdev);
809 mgmt_powered(hdev, 0);
810 hci_dev_unlock(hdev);
811 }
5add6af8 812
1da177e4
LT
813 /* Clear flags */
814 hdev->flags = 0;
815
e59fda8d 816 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 817 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 818
1da177e4
LT
819 hci_req_unlock(hdev);
820
821 hci_dev_put(hdev);
822 return 0;
823}
824
825int hci_dev_close(__u16 dev)
826{
827 struct hci_dev *hdev;
828 int err;
829
70f23020
AE
830 hdev = hci_dev_get(dev);
831 if (!hdev)
1da177e4 832 return -ENODEV;
8ee56540
MH
833
834 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
835 cancel_delayed_work(&hdev->power_off);
836
1da177e4 837 err = hci_dev_do_close(hdev);
8ee56540 838
1da177e4
LT
839 hci_dev_put(hdev);
840 return err;
841}
842
843int hci_dev_reset(__u16 dev)
844{
845 struct hci_dev *hdev;
846 int ret = 0;
847
70f23020
AE
848 hdev = hci_dev_get(dev);
849 if (!hdev)
1da177e4
LT
850 return -ENODEV;
851
852 hci_req_lock(hdev);
1da177e4
LT
853
854 if (!test_bit(HCI_UP, &hdev->flags))
855 goto done;
856
857 /* Drop queues */
858 skb_queue_purge(&hdev->rx_q);
859 skb_queue_purge(&hdev->cmd_q);
860
09fd0de5 861 hci_dev_lock(hdev);
1da177e4
LT
862 inquiry_cache_flush(hdev);
863 hci_conn_hash_flush(hdev);
09fd0de5 864 hci_dev_unlock(hdev);
1da177e4
LT
865
866 if (hdev->flush)
867 hdev->flush(hdev);
868
8e87d142 869 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 870 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
871
872 if (!test_bit(HCI_RAW, &hdev->flags))
5f246e89 873 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
874
875done:
1da177e4
LT
876 hci_req_unlock(hdev);
877 hci_dev_put(hdev);
878 return ret;
879}
880
881int hci_dev_reset_stat(__u16 dev)
882{
883 struct hci_dev *hdev;
884 int ret = 0;
885
70f23020
AE
886 hdev = hci_dev_get(dev);
887 if (!hdev)
1da177e4
LT
888 return -ENODEV;
889
890 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
891
892 hci_dev_put(hdev);
893
894 return ret;
895}
896
897int hci_dev_cmd(unsigned int cmd, void __user *arg)
898{
899 struct hci_dev *hdev;
900 struct hci_dev_req dr;
901 int err = 0;
902
903 if (copy_from_user(&dr, arg, sizeof(dr)))
904 return -EFAULT;
905
70f23020
AE
906 hdev = hci_dev_get(dr.dev_id);
907 if (!hdev)
1da177e4
LT
908 return -ENODEV;
909
910 switch (cmd) {
911 case HCISETAUTH:
04837f64 912 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
5f246e89 913 HCI_INIT_TIMEOUT);
1da177e4
LT
914 break;
915
916 case HCISETENCRYPT:
917 if (!lmp_encrypt_capable(hdev)) {
918 err = -EOPNOTSUPP;
919 break;
920 }
921
922 if (!test_bit(HCI_AUTH, &hdev->flags)) {
923 /* Auth must be enabled first */
04837f64 924 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
5f246e89 925 HCI_INIT_TIMEOUT);
1da177e4
LT
926 if (err)
927 break;
928 }
929
04837f64 930 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
5f246e89 931 HCI_INIT_TIMEOUT);
1da177e4
LT
932 break;
933
934 case HCISETSCAN:
04837f64 935 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
5f246e89 936 HCI_INIT_TIMEOUT);
1da177e4
LT
937 break;
938
1da177e4 939 case HCISETLINKPOL:
e4e8e37c 940 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
5f246e89 941 HCI_INIT_TIMEOUT);
1da177e4
LT
942 break;
943
944 case HCISETLINKMODE:
e4e8e37c
MH
945 hdev->link_mode = ((__u16) dr.dev_opt) &
946 (HCI_LM_MASTER | HCI_LM_ACCEPT);
947 break;
948
949 case HCISETPTYPE:
950 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
951 break;
952
953 case HCISETACLMTU:
e4e8e37c
MH
954 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
955 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
956 break;
957
958 case HCISETSCOMTU:
e4e8e37c
MH
959 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
960 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
961 break;
962
963 default:
964 err = -EINVAL;
965 break;
966 }
e4e8e37c 967
1da177e4
LT
968 hci_dev_put(hdev);
969 return err;
970}
971
972int hci_get_dev_list(void __user *arg)
973{
8035ded4 974 struct hci_dev *hdev;
1da177e4
LT
975 struct hci_dev_list_req *dl;
976 struct hci_dev_req *dr;
1da177e4
LT
977 int n = 0, size, err;
978 __u16 dev_num;
979
980 if (get_user(dev_num, (__u16 __user *) arg))
981 return -EFAULT;
982
983 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
984 return -EINVAL;
985
986 size = sizeof(*dl) + dev_num * sizeof(*dr);
987
70f23020
AE
988 dl = kzalloc(size, GFP_KERNEL);
989 if (!dl)
1da177e4
LT
990 return -ENOMEM;
991
992 dr = dl->dev_req;
993
f20d09d5 994 read_lock(&hci_dev_list_lock);
8035ded4 995 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 996 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 997 cancel_delayed_work(&hdev->power_off);
c542a06c 998
a8b2d5c2
JH
999 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1000 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1001
1da177e4
LT
1002 (dr + n)->dev_id = hdev->id;
1003 (dr + n)->dev_opt = hdev->flags;
c542a06c 1004
1da177e4
LT
1005 if (++n >= dev_num)
1006 break;
1007 }
f20d09d5 1008 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1009
1010 dl->dev_num = n;
1011 size = sizeof(*dl) + n * sizeof(*dr);
1012
1013 err = copy_to_user(arg, dl, size);
1014 kfree(dl);
1015
1016 return err ? -EFAULT : 0;
1017}
1018
1019int hci_get_dev_info(void __user *arg)
1020{
1021 struct hci_dev *hdev;
1022 struct hci_dev_info di;
1023 int err = 0;
1024
1025 if (copy_from_user(&di, arg, sizeof(di)))
1026 return -EFAULT;
1027
70f23020
AE
1028 hdev = hci_dev_get(di.dev_id);
1029 if (!hdev)
1da177e4
LT
1030 return -ENODEV;
1031
a8b2d5c2 1032 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1033 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1034
a8b2d5c2
JH
1035 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1036 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1037
1da177e4
LT
1038 strcpy(di.name, hdev->name);
1039 di.bdaddr = hdev->bdaddr;
943da25d 1040 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1041 di.flags = hdev->flags;
1042 di.pkt_type = hdev->pkt_type;
1043 di.acl_mtu = hdev->acl_mtu;
1044 di.acl_pkts = hdev->acl_pkts;
1045 di.sco_mtu = hdev->sco_mtu;
1046 di.sco_pkts = hdev->sco_pkts;
1047 di.link_policy = hdev->link_policy;
1048 di.link_mode = hdev->link_mode;
1049
1050 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1051 memcpy(&di.features, &hdev->features, sizeof(di.features));
1052
1053 if (copy_to_user(arg, &di, sizeof(di)))
1054 err = -EFAULT;
1055
1056 hci_dev_put(hdev);
1057
1058 return err;
1059}
1060
1061/* ---- Interface to HCI drivers ---- */
1062
611b30f7
MH
1063static int hci_rfkill_set_block(void *data, bool blocked)
1064{
1065 struct hci_dev *hdev = data;
1066
1067 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1068
1069 if (!blocked)
1070 return 0;
1071
1072 hci_dev_do_close(hdev);
1073
1074 return 0;
1075}
1076
1077static const struct rfkill_ops hci_rfkill_ops = {
1078 .set_block = hci_rfkill_set_block,
1079};
1080
ab81cbf9
JH
1081static void hci_power_on(struct work_struct *work)
1082{
1083 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1084
1085 BT_DBG("%s", hdev->name);
1086
1087 if (hci_dev_open(hdev->id) < 0)
1088 return;
1089
a8b2d5c2 1090 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1091 schedule_delayed_work(&hdev->power_off,
a8c5fb1a 1092 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1093
a8b2d5c2 1094 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1095 mgmt_index_added(hdev);
ab81cbf9
JH
1096}
1097
1098static void hci_power_off(struct work_struct *work)
1099{
3243553f 1100 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1101 power_off.work);
ab81cbf9
JH
1102
1103 BT_DBG("%s", hdev->name);
1104
8ee56540 1105 hci_dev_do_close(hdev);
ab81cbf9
JH
1106}
1107
16ab91ab
JH
1108static void hci_discov_off(struct work_struct *work)
1109{
1110 struct hci_dev *hdev;
1111 u8 scan = SCAN_PAGE;
1112
1113 hdev = container_of(work, struct hci_dev, discov_off.work);
1114
1115 BT_DBG("%s", hdev->name);
1116
09fd0de5 1117 hci_dev_lock(hdev);
16ab91ab
JH
1118
1119 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1120
1121 hdev->discov_timeout = 0;
1122
09fd0de5 1123 hci_dev_unlock(hdev);
16ab91ab
JH
1124}
1125
2aeb9a1a
JH
1126int hci_uuids_clear(struct hci_dev *hdev)
1127{
1128 struct list_head *p, *n;
1129
1130 list_for_each_safe(p, n, &hdev->uuids) {
1131 struct bt_uuid *uuid;
1132
1133 uuid = list_entry(p, struct bt_uuid, list);
1134
1135 list_del(p);
1136 kfree(uuid);
1137 }
1138
1139 return 0;
1140}
1141
55ed8ca1
JH
1142int hci_link_keys_clear(struct hci_dev *hdev)
1143{
1144 struct list_head *p, *n;
1145
1146 list_for_each_safe(p, n, &hdev->link_keys) {
1147 struct link_key *key;
1148
1149 key = list_entry(p, struct link_key, list);
1150
1151 list_del(p);
1152 kfree(key);
1153 }
1154
1155 return 0;
1156}
1157
b899efaf
VCG
1158int hci_smp_ltks_clear(struct hci_dev *hdev)
1159{
1160 struct smp_ltk *k, *tmp;
1161
1162 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1163 list_del(&k->list);
1164 kfree(k);
1165 }
1166
1167 return 0;
1168}
1169
55ed8ca1
JH
1170struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1171{
8035ded4 1172 struct link_key *k;
55ed8ca1 1173
8035ded4 1174 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1175 if (bacmp(bdaddr, &k->bdaddr) == 0)
1176 return k;
55ed8ca1
JH
1177
1178 return NULL;
1179}
1180
745c0ce3 1181static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1182 u8 key_type, u8 old_key_type)
d25e28ab
JH
1183{
1184 /* Legacy key */
1185 if (key_type < 0x03)
745c0ce3 1186 return true;
d25e28ab
JH
1187
1188 /* Debug keys are insecure so don't store them persistently */
1189 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1190 return false;
d25e28ab
JH
1191
1192 /* Changed combination key and there's no previous one */
1193 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1194 return false;
d25e28ab
JH
1195
1196 /* Security mode 3 case */
1197 if (!conn)
745c0ce3 1198 return true;
d25e28ab
JH
1199
1200 /* Neither local nor remote side had no-bonding as requirement */
1201 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1202 return true;
d25e28ab
JH
1203
1204 /* Local side had dedicated bonding as requirement */
1205 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1206 return true;
d25e28ab
JH
1207
1208 /* Remote side had dedicated bonding as requirement */
1209 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1210 return true;
d25e28ab
JH
1211
1212 /* If none of the above criteria match, then don't store the key
1213 * persistently */
745c0ce3 1214 return false;
d25e28ab
JH
1215}
1216
c9839a11 1217struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1218{
c9839a11 1219 struct smp_ltk *k;
75d262c2 1220
c9839a11
VCG
1221 list_for_each_entry(k, &hdev->long_term_keys, list) {
1222 if (k->ediv != ediv ||
a8c5fb1a 1223 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1224 continue;
1225
c9839a11 1226 return k;
75d262c2
VCG
1227 }
1228
1229 return NULL;
1230}
75d262c2 1231
c9839a11 1232struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1233 u8 addr_type)
75d262c2 1234{
c9839a11 1235 struct smp_ltk *k;
75d262c2 1236
c9839a11
VCG
1237 list_for_each_entry(k, &hdev->long_term_keys, list)
1238 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1239 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1240 return k;
1241
1242 return NULL;
1243}
75d262c2 1244
d25e28ab 1245int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1246 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1247{
1248 struct link_key *key, *old_key;
745c0ce3
VA
1249 u8 old_key_type;
1250 bool persistent;
55ed8ca1
JH
1251
1252 old_key = hci_find_link_key(hdev, bdaddr);
1253 if (old_key) {
1254 old_key_type = old_key->type;
1255 key = old_key;
1256 } else {
12adcf3a 1257 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1258 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1259 if (!key)
1260 return -ENOMEM;
1261 list_add(&key->list, &hdev->link_keys);
1262 }
1263
1264 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1265
d25e28ab
JH
1266 /* Some buggy controller combinations generate a changed
1267 * combination key for legacy pairing even when there's no
1268 * previous key */
1269 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1270 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1271 type = HCI_LK_COMBINATION;
655fe6ec
JH
1272 if (conn)
1273 conn->key_type = type;
1274 }
d25e28ab 1275
55ed8ca1 1276 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1277 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1278 key->pin_len = pin_len;
1279
b6020ba0 1280 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1281 key->type = old_key_type;
4748fed2
JH
1282 else
1283 key->type = type;
1284
4df378a1
JH
1285 if (!new_key)
1286 return 0;
1287
1288 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1289
744cf19e 1290 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1291
6ec5bcad
VA
1292 if (conn)
1293 conn->flush_key = !persistent;
55ed8ca1
JH
1294
1295 return 0;
1296}
1297
c9839a11 1298int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1299 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1300 ediv, u8 rand[8])
75d262c2 1301{
c9839a11 1302 struct smp_ltk *key, *old_key;
75d262c2 1303
c9839a11
VCG
1304 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1305 return 0;
75d262c2 1306
c9839a11
VCG
1307 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1308 if (old_key)
75d262c2 1309 key = old_key;
c9839a11
VCG
1310 else {
1311 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1312 if (!key)
1313 return -ENOMEM;
c9839a11 1314 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1315 }
1316
75d262c2 1317 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1318 key->bdaddr_type = addr_type;
1319 memcpy(key->val, tk, sizeof(key->val));
1320 key->authenticated = authenticated;
1321 key->ediv = ediv;
1322 key->enc_size = enc_size;
1323 key->type = type;
1324 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1325
c9839a11
VCG
1326 if (!new_key)
1327 return 0;
75d262c2 1328
261cc5aa
VCG
1329 if (type & HCI_SMP_LTK)
1330 mgmt_new_ltk(hdev, key, 1);
1331
75d262c2
VCG
1332 return 0;
1333}
1334
55ed8ca1
JH
1335int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1336{
1337 struct link_key *key;
1338
1339 key = hci_find_link_key(hdev, bdaddr);
1340 if (!key)
1341 return -ENOENT;
1342
1343 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1344
1345 list_del(&key->list);
1346 kfree(key);
1347
1348 return 0;
1349}
1350
b899efaf
VCG
1351int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1352{
1353 struct smp_ltk *k, *tmp;
1354
1355 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1356 if (bacmp(bdaddr, &k->bdaddr))
1357 continue;
1358
1359 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1360
1361 list_del(&k->list);
1362 kfree(k);
1363 }
1364
1365 return 0;
1366}
1367
6bd32326 1368/* HCI command timer function */
bda4f23a 1369static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1370{
1371 struct hci_dev *hdev = (void *) arg;
1372
bda4f23a
AE
1373 if (hdev->sent_cmd) {
1374 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1375 u16 opcode = __le16_to_cpu(sent->opcode);
1376
1377 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1378 } else {
1379 BT_ERR("%s command tx timeout", hdev->name);
1380 }
1381
6bd32326 1382 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1383 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1384}
1385
2763eda6 1386struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1387 bdaddr_t *bdaddr)
2763eda6
SJ
1388{
1389 struct oob_data *data;
1390
1391 list_for_each_entry(data, &hdev->remote_oob_data, list)
1392 if (bacmp(bdaddr, &data->bdaddr) == 0)
1393 return data;
1394
1395 return NULL;
1396}
1397
1398int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1399{
1400 struct oob_data *data;
1401
1402 data = hci_find_remote_oob_data(hdev, bdaddr);
1403 if (!data)
1404 return -ENOENT;
1405
1406 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1407
1408 list_del(&data->list);
1409 kfree(data);
1410
1411 return 0;
1412}
1413
1414int hci_remote_oob_data_clear(struct hci_dev *hdev)
1415{
1416 struct oob_data *data, *n;
1417
1418 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1419 list_del(&data->list);
1420 kfree(data);
1421 }
1422
1423 return 0;
1424}
1425
1426int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1427 u8 *randomizer)
2763eda6
SJ
1428{
1429 struct oob_data *data;
1430
1431 data = hci_find_remote_oob_data(hdev, bdaddr);
1432
1433 if (!data) {
1434 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1435 if (!data)
1436 return -ENOMEM;
1437
1438 bacpy(&data->bdaddr, bdaddr);
1439 list_add(&data->list, &hdev->remote_oob_data);
1440 }
1441
1442 memcpy(data->hash, hash, sizeof(data->hash));
1443 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1444
1445 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1446
1447 return 0;
1448}
1449
04124681 1450struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1451{
8035ded4 1452 struct bdaddr_list *b;
b2a66aad 1453
8035ded4 1454 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1455 if (bacmp(bdaddr, &b->bdaddr) == 0)
1456 return b;
b2a66aad
AJ
1457
1458 return NULL;
1459}
1460
1461int hci_blacklist_clear(struct hci_dev *hdev)
1462{
1463 struct list_head *p, *n;
1464
1465 list_for_each_safe(p, n, &hdev->blacklist) {
1466 struct bdaddr_list *b;
1467
1468 b = list_entry(p, struct bdaddr_list, list);
1469
1470 list_del(p);
1471 kfree(b);
1472 }
1473
1474 return 0;
1475}
1476
88c1fe4b 1477int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1478{
1479 struct bdaddr_list *entry;
b2a66aad
AJ
1480
1481 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1482 return -EBADF;
1483
5e762444
AJ
1484 if (hci_blacklist_lookup(hdev, bdaddr))
1485 return -EEXIST;
b2a66aad
AJ
1486
1487 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1488 if (!entry)
1489 return -ENOMEM;
b2a66aad
AJ
1490
1491 bacpy(&entry->bdaddr, bdaddr);
1492
1493 list_add(&entry->list, &hdev->blacklist);
1494
88c1fe4b 1495 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1496}
1497
88c1fe4b 1498int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1499{
1500 struct bdaddr_list *entry;
b2a66aad 1501
1ec918ce 1502 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1503 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1504
1505 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1506 if (!entry)
5e762444 1507 return -ENOENT;
b2a66aad
AJ
1508
1509 list_del(&entry->list);
1510 kfree(entry);
1511
88c1fe4b 1512 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1513}
1514
7ba8b4be
AG
1515static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1516{
1517 struct le_scan_params *param = (struct le_scan_params *) opt;
1518 struct hci_cp_le_set_scan_param cp;
1519
1520 memset(&cp, 0, sizeof(cp));
1521 cp.type = param->type;
1522 cp.interval = cpu_to_le16(param->interval);
1523 cp.window = cpu_to_le16(param->window);
1524
1525 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1526}
1527
1528static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1529{
1530 struct hci_cp_le_set_scan_enable cp;
1531
1532 memset(&cp, 0, sizeof(cp));
1533 cp.enable = 1;
0431a43c 1534 cp.filter_dup = 1;
7ba8b4be
AG
1535
1536 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1537}
1538
1539static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1540 u16 window, int timeout)
7ba8b4be
AG
1541{
1542 long timeo = msecs_to_jiffies(3000);
1543 struct le_scan_params param;
1544 int err;
1545
1546 BT_DBG("%s", hdev->name);
1547
1548 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1549 return -EINPROGRESS;
1550
1551 param.type = type;
1552 param.interval = interval;
1553 param.window = window;
1554
1555 hci_req_lock(hdev);
1556
1557 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
04124681 1558 timeo);
7ba8b4be
AG
1559 if (!err)
1560 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1561
1562 hci_req_unlock(hdev);
1563
1564 if (err < 0)
1565 return err;
1566
1567 schedule_delayed_work(&hdev->le_scan_disable,
04124681 1568 msecs_to_jiffies(timeout));
7ba8b4be
AG
1569
1570 return 0;
1571}
1572
7dbfac1d
AG
1573int hci_cancel_le_scan(struct hci_dev *hdev)
1574{
1575 BT_DBG("%s", hdev->name);
1576
1577 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1578 return -EALREADY;
1579
1580 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1581 struct hci_cp_le_set_scan_enable cp;
1582
1583 /* Send HCI command to disable LE Scan */
1584 memset(&cp, 0, sizeof(cp));
1585 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1586 }
1587
1588 return 0;
1589}
1590
7ba8b4be
AG
1591static void le_scan_disable_work(struct work_struct *work)
1592{
1593 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1594 le_scan_disable.work);
7ba8b4be
AG
1595 struct hci_cp_le_set_scan_enable cp;
1596
1597 BT_DBG("%s", hdev->name);
1598
1599 memset(&cp, 0, sizeof(cp));
1600
1601 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1602}
1603
28b75a89
AG
1604static void le_scan_work(struct work_struct *work)
1605{
1606 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1607 struct le_scan_params *param = &hdev->le_scan_params;
1608
1609 BT_DBG("%s", hdev->name);
1610
04124681
GP
1611 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1612 param->timeout);
28b75a89
AG
1613}
1614
1615int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1616 int timeout)
28b75a89
AG
1617{
1618 struct le_scan_params *param = &hdev->le_scan_params;
1619
1620 BT_DBG("%s", hdev->name);
1621
1622 if (work_busy(&hdev->le_scan))
1623 return -EINPROGRESS;
1624
1625 param->type = type;
1626 param->interval = interval;
1627 param->window = window;
1628 param->timeout = timeout;
1629
1630 queue_work(system_long_wq, &hdev->le_scan);
1631
1632 return 0;
1633}
1634
9be0dab7
DH
1635/* Alloc HCI device */
1636struct hci_dev *hci_alloc_dev(void)
1637{
1638 struct hci_dev *hdev;
1639
1640 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1641 if (!hdev)
1642 return NULL;
1643
b1b813d4
DH
1644 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1645 hdev->esco_type = (ESCO_HV1);
1646 hdev->link_mode = (HCI_LM_ACCEPT);
1647 hdev->io_capability = 0x03; /* No Input No Output */
1648
b1b813d4
DH
1649 hdev->sniff_max_interval = 800;
1650 hdev->sniff_min_interval = 80;
1651
1652 mutex_init(&hdev->lock);
1653 mutex_init(&hdev->req_lock);
1654
1655 INIT_LIST_HEAD(&hdev->mgmt_pending);
1656 INIT_LIST_HEAD(&hdev->blacklist);
1657 INIT_LIST_HEAD(&hdev->uuids);
1658 INIT_LIST_HEAD(&hdev->link_keys);
1659 INIT_LIST_HEAD(&hdev->long_term_keys);
1660 INIT_LIST_HEAD(&hdev->remote_oob_data);
b1b813d4
DH
1661
1662 INIT_WORK(&hdev->rx_work, hci_rx_work);
1663 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1664 INIT_WORK(&hdev->tx_work, hci_tx_work);
1665 INIT_WORK(&hdev->power_on, hci_power_on);
1666 INIT_WORK(&hdev->le_scan, le_scan_work);
1667
b1b813d4
DH
1668 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1669 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1670 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1671
9be0dab7 1672 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
1673 skb_queue_head_init(&hdev->rx_q);
1674 skb_queue_head_init(&hdev->cmd_q);
1675 skb_queue_head_init(&hdev->raw_q);
1676
1677 init_waitqueue_head(&hdev->req_wait_q);
1678
bda4f23a 1679 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 1680
b1b813d4
DH
1681 hci_init_sysfs(hdev);
1682 discovery_init(hdev);
1683 hci_conn_hash_init(hdev);
9be0dab7
DH
1684
1685 return hdev;
1686}
1687EXPORT_SYMBOL(hci_alloc_dev);
1688
1689/* Free HCI device */
1690void hci_free_dev(struct hci_dev *hdev)
1691{
1692 skb_queue_purge(&hdev->driver_init);
1693
1694 /* will free via device release */
1695 put_device(&hdev->dev);
1696}
1697EXPORT_SYMBOL(hci_free_dev);
1698
1da177e4
LT
1699/* Register HCI device */
1700int hci_register_dev(struct hci_dev *hdev)
1701{
b1b813d4 1702 int id, error;
1da177e4 1703
010666a1 1704 if (!hdev->open || !hdev->close)
1da177e4
LT
1705 return -EINVAL;
1706
08add513
MM
1707 /* Do not allow HCI_AMP devices to register at index 0,
1708 * so the index can be used as the AMP controller ID.
1709 */
3df92b31
SL
1710 switch (hdev->dev_type) {
1711 case HCI_BREDR:
1712 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1713 break;
1714 case HCI_AMP:
1715 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1716 break;
1717 default:
1718 return -EINVAL;
1da177e4 1719 }
8e87d142 1720
3df92b31
SL
1721 if (id < 0)
1722 return id;
1723
1da177e4
LT
1724 sprintf(hdev->name, "hci%d", id);
1725 hdev->id = id;
2d8b3a11
AE
1726
1727 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1728
3df92b31
SL
1729 write_lock(&hci_dev_list_lock);
1730 list_add(&hdev->list, &hci_dev_list);
f20d09d5 1731 write_unlock(&hci_dev_list_lock);
1da177e4 1732
32845eb1 1733 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 1734 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1735 if (!hdev->workqueue) {
1736 error = -ENOMEM;
1737 goto err;
1738 }
f48fd9c8 1739
33ca954d
DH
1740 error = hci_add_sysfs(hdev);
1741 if (error < 0)
1742 goto err_wqueue;
1da177e4 1743
611b30f7 1744 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
1745 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1746 hdev);
611b30f7
MH
1747 if (hdev->rfkill) {
1748 if (rfkill_register(hdev->rfkill) < 0) {
1749 rfkill_destroy(hdev->rfkill);
1750 hdev->rfkill = NULL;
1751 }
1752 }
1753
a8b2d5c2
JH
1754 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1755 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1756 schedule_work(&hdev->power_on);
ab81cbf9 1757
1da177e4 1758 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1759 hci_dev_hold(hdev);
1da177e4
LT
1760
1761 return id;
f48fd9c8 1762
33ca954d
DH
1763err_wqueue:
1764 destroy_workqueue(hdev->workqueue);
1765err:
3df92b31 1766 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 1767 write_lock(&hci_dev_list_lock);
f48fd9c8 1768 list_del(&hdev->list);
f20d09d5 1769 write_unlock(&hci_dev_list_lock);
f48fd9c8 1770
33ca954d 1771 return error;
1da177e4
LT
1772}
1773EXPORT_SYMBOL(hci_register_dev);
1774
1775/* Unregister HCI device */
59735631 1776void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1777{
3df92b31 1778 int i, id;
ef222013 1779
c13854ce 1780 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1781
94324962
JH
1782 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1783
3df92b31
SL
1784 id = hdev->id;
1785
f20d09d5 1786 write_lock(&hci_dev_list_lock);
1da177e4 1787 list_del(&hdev->list);
f20d09d5 1788 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1789
1790 hci_dev_do_close(hdev);
1791
cd4c5391 1792 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1793 kfree_skb(hdev->reassembly[i]);
1794
ab81cbf9 1795 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 1796 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1797 hci_dev_lock(hdev);
744cf19e 1798 mgmt_index_removed(hdev);
09fd0de5 1799 hci_dev_unlock(hdev);
56e5cb86 1800 }
ab81cbf9 1801
2e58ef3e
JH
1802 /* mgmt_index_removed should take care of emptying the
1803 * pending list */
1804 BUG_ON(!list_empty(&hdev->mgmt_pending));
1805
1da177e4
LT
1806 hci_notify(hdev, HCI_DEV_UNREG);
1807
611b30f7
MH
1808 if (hdev->rfkill) {
1809 rfkill_unregister(hdev->rfkill);
1810 rfkill_destroy(hdev->rfkill);
1811 }
1812
ce242970 1813 hci_del_sysfs(hdev);
147e2d59 1814
f48fd9c8
MH
1815 destroy_workqueue(hdev->workqueue);
1816
09fd0de5 1817 hci_dev_lock(hdev);
e2e0cacb 1818 hci_blacklist_clear(hdev);
2aeb9a1a 1819 hci_uuids_clear(hdev);
55ed8ca1 1820 hci_link_keys_clear(hdev);
b899efaf 1821 hci_smp_ltks_clear(hdev);
2763eda6 1822 hci_remote_oob_data_clear(hdev);
09fd0de5 1823 hci_dev_unlock(hdev);
e2e0cacb 1824
dc946bd8 1825 hci_dev_put(hdev);
3df92b31
SL
1826
1827 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
1828}
1829EXPORT_SYMBOL(hci_unregister_dev);
1830
1831/* Suspend HCI device */
1832int hci_suspend_dev(struct hci_dev *hdev)
1833{
1834 hci_notify(hdev, HCI_DEV_SUSPEND);
1835 return 0;
1836}
1837EXPORT_SYMBOL(hci_suspend_dev);
1838
1839/* Resume HCI device */
1840int hci_resume_dev(struct hci_dev *hdev)
1841{
1842 hci_notify(hdev, HCI_DEV_RESUME);
1843 return 0;
1844}
1845EXPORT_SYMBOL(hci_resume_dev);
1846
76bca880
MH
1847/* Receive frame from HCI drivers */
1848int hci_recv_frame(struct sk_buff *skb)
1849{
1850 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1851 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 1852 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
1853 kfree_skb(skb);
1854 return -ENXIO;
1855 }
1856
1857 /* Incomming skb */
1858 bt_cb(skb)->incoming = 1;
1859
1860 /* Time stamp */
1861 __net_timestamp(skb);
1862
76bca880 1863 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1864 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1865
76bca880
MH
1866 return 0;
1867}
1868EXPORT_SYMBOL(hci_recv_frame);
1869
33e882a5 1870static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 1871 int count, __u8 index)
33e882a5
SS
1872{
1873 int len = 0;
1874 int hlen = 0;
1875 int remain = count;
1876 struct sk_buff *skb;
1877 struct bt_skb_cb *scb;
1878
1879 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 1880 index >= NUM_REASSEMBLY)
33e882a5
SS
1881 return -EILSEQ;
1882
1883 skb = hdev->reassembly[index];
1884
1885 if (!skb) {
1886 switch (type) {
1887 case HCI_ACLDATA_PKT:
1888 len = HCI_MAX_FRAME_SIZE;
1889 hlen = HCI_ACL_HDR_SIZE;
1890 break;
1891 case HCI_EVENT_PKT:
1892 len = HCI_MAX_EVENT_SIZE;
1893 hlen = HCI_EVENT_HDR_SIZE;
1894 break;
1895 case HCI_SCODATA_PKT:
1896 len = HCI_MAX_SCO_SIZE;
1897 hlen = HCI_SCO_HDR_SIZE;
1898 break;
1899 }
1900
1e429f38 1901 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1902 if (!skb)
1903 return -ENOMEM;
1904
1905 scb = (void *) skb->cb;
1906 scb->expect = hlen;
1907 scb->pkt_type = type;
1908
1909 skb->dev = (void *) hdev;
1910 hdev->reassembly[index] = skb;
1911 }
1912
1913 while (count) {
1914 scb = (void *) skb->cb;
89bb46d0 1915 len = min_t(uint, scb->expect, count);
33e882a5
SS
1916
1917 memcpy(skb_put(skb, len), data, len);
1918
1919 count -= len;
1920 data += len;
1921 scb->expect -= len;
1922 remain = count;
1923
1924 switch (type) {
1925 case HCI_EVENT_PKT:
1926 if (skb->len == HCI_EVENT_HDR_SIZE) {
1927 struct hci_event_hdr *h = hci_event_hdr(skb);
1928 scb->expect = h->plen;
1929
1930 if (skb_tailroom(skb) < scb->expect) {
1931 kfree_skb(skb);
1932 hdev->reassembly[index] = NULL;
1933 return -ENOMEM;
1934 }
1935 }
1936 break;
1937
1938 case HCI_ACLDATA_PKT:
1939 if (skb->len == HCI_ACL_HDR_SIZE) {
1940 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1941 scb->expect = __le16_to_cpu(h->dlen);
1942
1943 if (skb_tailroom(skb) < scb->expect) {
1944 kfree_skb(skb);
1945 hdev->reassembly[index] = NULL;
1946 return -ENOMEM;
1947 }
1948 }
1949 break;
1950
1951 case HCI_SCODATA_PKT:
1952 if (skb->len == HCI_SCO_HDR_SIZE) {
1953 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1954 scb->expect = h->dlen;
1955
1956 if (skb_tailroom(skb) < scb->expect) {
1957 kfree_skb(skb);
1958 hdev->reassembly[index] = NULL;
1959 return -ENOMEM;
1960 }
1961 }
1962 break;
1963 }
1964
1965 if (scb->expect == 0) {
1966 /* Complete frame */
1967
1968 bt_cb(skb)->pkt_type = type;
1969 hci_recv_frame(skb);
1970
1971 hdev->reassembly[index] = NULL;
1972 return remain;
1973 }
1974 }
1975
1976 return remain;
1977}
1978
ef222013
MH
1979int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1980{
f39a3c06
SS
1981 int rem = 0;
1982
ef222013
MH
1983 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1984 return -EILSEQ;
1985
da5f6c37 1986 while (count) {
1e429f38 1987 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1988 if (rem < 0)
1989 return rem;
ef222013 1990
f39a3c06
SS
1991 data += (count - rem);
1992 count = rem;
f81c6224 1993 }
ef222013 1994
f39a3c06 1995 return rem;
ef222013
MH
1996}
1997EXPORT_SYMBOL(hci_recv_fragment);
1998
99811510
SS
1999#define STREAM_REASSEMBLY 0
2000
2001int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2002{
2003 int type;
2004 int rem = 0;
2005
da5f6c37 2006 while (count) {
99811510
SS
2007 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2008
2009 if (!skb) {
2010 struct { char type; } *pkt;
2011
2012 /* Start of the frame */
2013 pkt = data;
2014 type = pkt->type;
2015
2016 data++;
2017 count--;
2018 } else
2019 type = bt_cb(skb)->pkt_type;
2020
1e429f38 2021 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2022 STREAM_REASSEMBLY);
99811510
SS
2023 if (rem < 0)
2024 return rem;
2025
2026 data += (count - rem);
2027 count = rem;
f81c6224 2028 }
99811510
SS
2029
2030 return rem;
2031}
2032EXPORT_SYMBOL(hci_recv_stream_fragment);
2033
1da177e4
LT
2034/* ---- Interface to upper protocols ---- */
2035
1da177e4
LT
2036int hci_register_cb(struct hci_cb *cb)
2037{
2038 BT_DBG("%p name %s", cb, cb->name);
2039
f20d09d5 2040 write_lock(&hci_cb_list_lock);
1da177e4 2041 list_add(&cb->list, &hci_cb_list);
f20d09d5 2042 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2043
2044 return 0;
2045}
2046EXPORT_SYMBOL(hci_register_cb);
2047
2048int hci_unregister_cb(struct hci_cb *cb)
2049{
2050 BT_DBG("%p name %s", cb, cb->name);
2051
f20d09d5 2052 write_lock(&hci_cb_list_lock);
1da177e4 2053 list_del(&cb->list);
f20d09d5 2054 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2055
2056 return 0;
2057}
2058EXPORT_SYMBOL(hci_unregister_cb);
2059
2060static int hci_send_frame(struct sk_buff *skb)
2061{
2062 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2063
2064 if (!hdev) {
2065 kfree_skb(skb);
2066 return -ENODEV;
2067 }
2068
0d48d939 2069 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2070
cd82e61c
MH
2071 /* Time stamp */
2072 __net_timestamp(skb);
1da177e4 2073
cd82e61c
MH
2074 /* Send copy to monitor */
2075 hci_send_to_monitor(hdev, skb);
2076
2077 if (atomic_read(&hdev->promisc)) {
2078 /* Send copy to the sockets */
470fe1b5 2079 hci_send_to_sock(hdev, skb);
1da177e4
LT
2080 }
2081
2082 /* Get rid of skb owner, prior to sending to the driver. */
2083 skb_orphan(skb);
2084
2085 return hdev->send(skb);
2086}
2087
2088/* Send HCI command */
a9de9248 2089int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2090{
2091 int len = HCI_COMMAND_HDR_SIZE + plen;
2092 struct hci_command_hdr *hdr;
2093 struct sk_buff *skb;
2094
f0e09510 2095 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2096
2097 skb = bt_skb_alloc(len, GFP_ATOMIC);
2098 if (!skb) {
ef222013 2099 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2100 return -ENOMEM;
2101 }
2102
2103 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2104 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2105 hdr->plen = plen;
2106
2107 if (plen)
2108 memcpy(skb_put(skb, plen), param, plen);
2109
2110 BT_DBG("skb len %d", skb->len);
2111
0d48d939 2112 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2113 skb->dev = (void *) hdev;
c78ae283 2114
a5040efa
JH
2115 if (test_bit(HCI_INIT, &hdev->flags))
2116 hdev->init_last_cmd = opcode;
2117
1da177e4 2118 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2119 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2120
2121 return 0;
2122}
1da177e4
LT
2123
2124/* Get data from the previously sent command */
a9de9248 2125void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2126{
2127 struct hci_command_hdr *hdr;
2128
2129 if (!hdev->sent_cmd)
2130 return NULL;
2131
2132 hdr = (void *) hdev->sent_cmd->data;
2133
a9de9248 2134 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2135 return NULL;
2136
f0e09510 2137 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2138
2139 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2140}
2141
2142/* Send ACL data */
2143static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2144{
2145 struct hci_acl_hdr *hdr;
2146 int len = skb->len;
2147
badff6d0
ACM
2148 skb_push(skb, HCI_ACL_HDR_SIZE);
2149 skb_reset_transport_header(skb);
9c70220b 2150 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2151 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2152 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2153}
2154
73d80deb 2155static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
a8c5fb1a 2156 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2157{
2158 struct hci_dev *hdev = conn->hdev;
2159 struct sk_buff *list;
2160
087bfd99
GP
2161 skb->len = skb_headlen(skb);
2162 skb->data_len = 0;
2163
2164 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2165 hci_add_acl_hdr(skb, conn->handle, flags);
2166
70f23020
AE
2167 list = skb_shinfo(skb)->frag_list;
2168 if (!list) {
1da177e4
LT
2169 /* Non fragmented */
2170 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2171
73d80deb 2172 skb_queue_tail(queue, skb);
1da177e4
LT
2173 } else {
2174 /* Fragmented */
2175 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2176
2177 skb_shinfo(skb)->frag_list = NULL;
2178
2179 /* Queue all fragments atomically */
af3e6359 2180 spin_lock(&queue->lock);
1da177e4 2181
73d80deb 2182 __skb_queue_tail(queue, skb);
e702112f
AE
2183
2184 flags &= ~ACL_START;
2185 flags |= ACL_CONT;
1da177e4
LT
2186 do {
2187 skb = list; list = list->next;
8e87d142 2188
1da177e4 2189 skb->dev = (void *) hdev;
0d48d939 2190 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2191 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2192
2193 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2194
73d80deb 2195 __skb_queue_tail(queue, skb);
1da177e4
LT
2196 } while (list);
2197
af3e6359 2198 spin_unlock(&queue->lock);
1da177e4 2199 }
73d80deb
LAD
2200}
2201
2202void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2203{
2204 struct hci_conn *conn = chan->conn;
2205 struct hci_dev *hdev = conn->hdev;
2206
f0e09510 2207 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2208
2209 skb->dev = (void *) hdev;
73d80deb
LAD
2210
2211 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2212
3eff45ea 2213 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2214}
1da177e4
LT
2215
2216/* Send SCO data */
0d861d8b 2217void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2218{
2219 struct hci_dev *hdev = conn->hdev;
2220 struct hci_sco_hdr hdr;
2221
2222 BT_DBG("%s len %d", hdev->name, skb->len);
2223
aca3192c 2224 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2225 hdr.dlen = skb->len;
2226
badff6d0
ACM
2227 skb_push(skb, HCI_SCO_HDR_SIZE);
2228 skb_reset_transport_header(skb);
9c70220b 2229 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2230
2231 skb->dev = (void *) hdev;
0d48d939 2232 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2233
1da177e4 2234 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2235 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2236}
1da177e4
LT
2237
2238/* ---- HCI TX task (outgoing data) ---- */
2239
2240/* HCI Connection scheduler */
6039aa73
GP
2241static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2242 int *quote)
1da177e4
LT
2243{
2244 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2245 struct hci_conn *conn = NULL, *c;
abc5de8f 2246 unsigned int num = 0, min = ~0;
1da177e4 2247
8e87d142 2248 /* We don't have to lock device here. Connections are always
1da177e4 2249 * added and removed with TX task disabled. */
bf4c6325
GP
2250
2251 rcu_read_lock();
2252
2253 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2254 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2255 continue;
769be974
MH
2256
2257 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2258 continue;
2259
1da177e4
LT
2260 num++;
2261
2262 if (c->sent < min) {
2263 min = c->sent;
2264 conn = c;
2265 }
52087a79
LAD
2266
2267 if (hci_conn_num(hdev, type) == num)
2268 break;
1da177e4
LT
2269 }
2270
bf4c6325
GP
2271 rcu_read_unlock();
2272
1da177e4 2273 if (conn) {
6ed58ec5
VT
2274 int cnt, q;
2275
2276 switch (conn->type) {
2277 case ACL_LINK:
2278 cnt = hdev->acl_cnt;
2279 break;
2280 case SCO_LINK:
2281 case ESCO_LINK:
2282 cnt = hdev->sco_cnt;
2283 break;
2284 case LE_LINK:
2285 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2286 break;
2287 default:
2288 cnt = 0;
2289 BT_ERR("Unknown link type");
2290 }
2291
2292 q = cnt / num;
1da177e4
LT
2293 *quote = q ? q : 1;
2294 } else
2295 *quote = 0;
2296
2297 BT_DBG("conn %p quote %d", conn, *quote);
2298 return conn;
2299}
2300
6039aa73 2301static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2302{
2303 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2304 struct hci_conn *c;
1da177e4 2305
bae1f5d9 2306 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2307
bf4c6325
GP
2308 rcu_read_lock();
2309
1da177e4 2310 /* Kill stalled connections */
bf4c6325 2311 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2312 if (c->type == type && c->sent) {
2313 BT_ERR("%s killing stalled connection %s",
a8c5fb1a 2314 hdev->name, batostr(&c->dst));
7490c6c2 2315 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2316 }
2317 }
bf4c6325
GP
2318
2319 rcu_read_unlock();
1da177e4
LT
2320}
2321
6039aa73
GP
2322static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2323 int *quote)
1da177e4 2324{
73d80deb
LAD
2325 struct hci_conn_hash *h = &hdev->conn_hash;
2326 struct hci_chan *chan = NULL;
abc5de8f 2327 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2328 struct hci_conn *conn;
73d80deb
LAD
2329 int cnt, q, conn_num = 0;
2330
2331 BT_DBG("%s", hdev->name);
2332
bf4c6325
GP
2333 rcu_read_lock();
2334
2335 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2336 struct hci_chan *tmp;
2337
2338 if (conn->type != type)
2339 continue;
2340
2341 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2342 continue;
2343
2344 conn_num++;
2345
8192edef 2346 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2347 struct sk_buff *skb;
2348
2349 if (skb_queue_empty(&tmp->data_q))
2350 continue;
2351
2352 skb = skb_peek(&tmp->data_q);
2353 if (skb->priority < cur_prio)
2354 continue;
2355
2356 if (skb->priority > cur_prio) {
2357 num = 0;
2358 min = ~0;
2359 cur_prio = skb->priority;
2360 }
2361
2362 num++;
2363
2364 if (conn->sent < min) {
2365 min = conn->sent;
2366 chan = tmp;
2367 }
2368 }
2369
2370 if (hci_conn_num(hdev, type) == conn_num)
2371 break;
2372 }
2373
bf4c6325
GP
2374 rcu_read_unlock();
2375
73d80deb
LAD
2376 if (!chan)
2377 return NULL;
2378
2379 switch (chan->conn->type) {
2380 case ACL_LINK:
2381 cnt = hdev->acl_cnt;
2382 break;
2383 case SCO_LINK:
2384 case ESCO_LINK:
2385 cnt = hdev->sco_cnt;
2386 break;
2387 case LE_LINK:
2388 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2389 break;
2390 default:
2391 cnt = 0;
2392 BT_ERR("Unknown link type");
2393 }
2394
2395 q = cnt / num;
2396 *quote = q ? q : 1;
2397 BT_DBG("chan %p quote %d", chan, *quote);
2398 return chan;
2399}
2400
02b20f0b
LAD
2401static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2402{
2403 struct hci_conn_hash *h = &hdev->conn_hash;
2404 struct hci_conn *conn;
2405 int num = 0;
2406
2407 BT_DBG("%s", hdev->name);
2408
bf4c6325
GP
2409 rcu_read_lock();
2410
2411 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2412 struct hci_chan *chan;
2413
2414 if (conn->type != type)
2415 continue;
2416
2417 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2418 continue;
2419
2420 num++;
2421
8192edef 2422 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2423 struct sk_buff *skb;
2424
2425 if (chan->sent) {
2426 chan->sent = 0;
2427 continue;
2428 }
2429
2430 if (skb_queue_empty(&chan->data_q))
2431 continue;
2432
2433 skb = skb_peek(&chan->data_q);
2434 if (skb->priority >= HCI_PRIO_MAX - 1)
2435 continue;
2436
2437 skb->priority = HCI_PRIO_MAX - 1;
2438
2439 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2440 skb->priority);
02b20f0b
LAD
2441 }
2442
2443 if (hci_conn_num(hdev, type) == num)
2444 break;
2445 }
bf4c6325
GP
2446
2447 rcu_read_unlock();
2448
02b20f0b
LAD
2449}
2450
b71d385a
AE
2451static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2452{
2453 /* Calculate count of blocks used by this packet */
2454 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2455}
2456
6039aa73 2457static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2458{
1da177e4
LT
2459 if (!test_bit(HCI_RAW, &hdev->flags)) {
2460 /* ACL tx timeout must be longer than maximum
2461 * link supervision timeout (40.9 seconds) */
63d2bc1b 2462 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2463 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2464 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2465 }
63d2bc1b 2466}
1da177e4 2467
6039aa73 2468static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2469{
2470 unsigned int cnt = hdev->acl_cnt;
2471 struct hci_chan *chan;
2472 struct sk_buff *skb;
2473 int quote;
2474
2475 __check_timeout(hdev, cnt);
04837f64 2476
73d80deb 2477 while (hdev->acl_cnt &&
a8c5fb1a 2478 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2479 u32 priority = (skb_peek(&chan->data_q))->priority;
2480 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2481 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2482 skb->len, skb->priority);
73d80deb 2483
ec1cce24
LAD
2484 /* Stop if priority has changed */
2485 if (skb->priority < priority)
2486 break;
2487
2488 skb = skb_dequeue(&chan->data_q);
2489
73d80deb 2490 hci_conn_enter_active_mode(chan->conn,
04124681 2491 bt_cb(skb)->force_active);
04837f64 2492
1da177e4
LT
2493 hci_send_frame(skb);
2494 hdev->acl_last_tx = jiffies;
2495
2496 hdev->acl_cnt--;
73d80deb
LAD
2497 chan->sent++;
2498 chan->conn->sent++;
1da177e4
LT
2499 }
2500 }
02b20f0b
LAD
2501
2502 if (cnt != hdev->acl_cnt)
2503 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2504}
2505
6039aa73 2506static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2507{
63d2bc1b 2508 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2509 struct hci_chan *chan;
2510 struct sk_buff *skb;
2511 int quote;
b71d385a 2512
63d2bc1b 2513 __check_timeout(hdev, cnt);
b71d385a
AE
2514
2515 while (hdev->block_cnt > 0 &&
a8c5fb1a 2516 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
b71d385a
AE
2517 u32 priority = (skb_peek(&chan->data_q))->priority;
2518 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2519 int blocks;
2520
2521 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2522 skb->len, skb->priority);
b71d385a
AE
2523
2524 /* Stop if priority has changed */
2525 if (skb->priority < priority)
2526 break;
2527
2528 skb = skb_dequeue(&chan->data_q);
2529
2530 blocks = __get_blocks(hdev, skb);
2531 if (blocks > hdev->block_cnt)
2532 return;
2533
2534 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2535 bt_cb(skb)->force_active);
b71d385a
AE
2536
2537 hci_send_frame(skb);
2538 hdev->acl_last_tx = jiffies;
2539
2540 hdev->block_cnt -= blocks;
2541 quote -= blocks;
2542
2543 chan->sent += blocks;
2544 chan->conn->sent += blocks;
2545 }
2546 }
2547
2548 if (cnt != hdev->block_cnt)
2549 hci_prio_recalculate(hdev, ACL_LINK);
2550}
2551
6039aa73 2552static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
2553{
2554 BT_DBG("%s", hdev->name);
2555
2556 if (!hci_conn_num(hdev, ACL_LINK))
2557 return;
2558
2559 switch (hdev->flow_ctl_mode) {
2560 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2561 hci_sched_acl_pkt(hdev);
2562 break;
2563
2564 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2565 hci_sched_acl_blk(hdev);
2566 break;
2567 }
2568}
2569
1da177e4 2570/* Schedule SCO */
6039aa73 2571static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
2572{
2573 struct hci_conn *conn;
2574 struct sk_buff *skb;
2575 int quote;
2576
2577 BT_DBG("%s", hdev->name);
2578
52087a79
LAD
2579 if (!hci_conn_num(hdev, SCO_LINK))
2580 return;
2581
1da177e4
LT
2582 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2583 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2584 BT_DBG("skb %p len %d", skb, skb->len);
2585 hci_send_frame(skb);
2586
2587 conn->sent++;
2588 if (conn->sent == ~0)
2589 conn->sent = 0;
2590 }
2591 }
2592}
2593
6039aa73 2594static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
2595{
2596 struct hci_conn *conn;
2597 struct sk_buff *skb;
2598 int quote;
2599
2600 BT_DBG("%s", hdev->name);
2601
52087a79
LAD
2602 if (!hci_conn_num(hdev, ESCO_LINK))
2603 return;
2604
8fc9ced3
GP
2605 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2606 &quote))) {
b6a0dc82
MH
2607 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2608 BT_DBG("skb %p len %d", skb, skb->len);
2609 hci_send_frame(skb);
2610
2611 conn->sent++;
2612 if (conn->sent == ~0)
2613 conn->sent = 0;
2614 }
2615 }
2616}
2617
6039aa73 2618static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 2619{
73d80deb 2620 struct hci_chan *chan;
6ed58ec5 2621 struct sk_buff *skb;
02b20f0b 2622 int quote, cnt, tmp;
6ed58ec5
VT
2623
2624 BT_DBG("%s", hdev->name);
2625
52087a79
LAD
2626 if (!hci_conn_num(hdev, LE_LINK))
2627 return;
2628
6ed58ec5
VT
2629 if (!test_bit(HCI_RAW, &hdev->flags)) {
2630 /* LE tx timeout must be longer than maximum
2631 * link supervision timeout (40.9 seconds) */
bae1f5d9 2632 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 2633 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2634 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2635 }
2636
2637 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2638 tmp = cnt;
73d80deb 2639 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2640 u32 priority = (skb_peek(&chan->data_q))->priority;
2641 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2642 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2643 skb->len, skb->priority);
6ed58ec5 2644
ec1cce24
LAD
2645 /* Stop if priority has changed */
2646 if (skb->priority < priority)
2647 break;
2648
2649 skb = skb_dequeue(&chan->data_q);
2650
6ed58ec5
VT
2651 hci_send_frame(skb);
2652 hdev->le_last_tx = jiffies;
2653
2654 cnt--;
73d80deb
LAD
2655 chan->sent++;
2656 chan->conn->sent++;
6ed58ec5
VT
2657 }
2658 }
73d80deb 2659
6ed58ec5
VT
2660 if (hdev->le_pkts)
2661 hdev->le_cnt = cnt;
2662 else
2663 hdev->acl_cnt = cnt;
02b20f0b
LAD
2664
2665 if (cnt != tmp)
2666 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2667}
2668
3eff45ea 2669static void hci_tx_work(struct work_struct *work)
1da177e4 2670{
3eff45ea 2671 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2672 struct sk_buff *skb;
2673
6ed58ec5 2674 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 2675 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2676
2677 /* Schedule queues and send stuff to HCI driver */
2678
2679 hci_sched_acl(hdev);
2680
2681 hci_sched_sco(hdev);
2682
b6a0dc82
MH
2683 hci_sched_esco(hdev);
2684
6ed58ec5
VT
2685 hci_sched_le(hdev);
2686
1da177e4
LT
2687 /* Send next queued raw (unknown type) packet */
2688 while ((skb = skb_dequeue(&hdev->raw_q)))
2689 hci_send_frame(skb);
1da177e4
LT
2690}
2691
25985edc 2692/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2693
2694/* ACL data packet */
6039aa73 2695static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2696{
2697 struct hci_acl_hdr *hdr = (void *) skb->data;
2698 struct hci_conn *conn;
2699 __u16 handle, flags;
2700
2701 skb_pull(skb, HCI_ACL_HDR_SIZE);
2702
2703 handle = __le16_to_cpu(hdr->handle);
2704 flags = hci_flags(handle);
2705 handle = hci_handle(handle);
2706
f0e09510 2707 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 2708 handle, flags);
1da177e4
LT
2709
2710 hdev->stat.acl_rx++;
2711
2712 hci_dev_lock(hdev);
2713 conn = hci_conn_hash_lookup_handle(hdev, handle);
2714 hci_dev_unlock(hdev);
8e87d142 2715
1da177e4 2716 if (conn) {
65983fc7 2717 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2718
671267bf
JH
2719 hci_dev_lock(hdev);
2720 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2721 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2722 mgmt_device_connected(hdev, &conn->dst, conn->type,
2723 conn->dst_type, 0, NULL, 0,
2724 conn->dev_class);
2725 hci_dev_unlock(hdev);
2726
1da177e4 2727 /* Send to upper protocol */
686ebf28
UF
2728 l2cap_recv_acldata(conn, skb, flags);
2729 return;
1da177e4 2730 } else {
8e87d142 2731 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 2732 hdev->name, handle);
1da177e4
LT
2733 }
2734
2735 kfree_skb(skb);
2736}
2737
2738/* SCO data packet */
6039aa73 2739static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2740{
2741 struct hci_sco_hdr *hdr = (void *) skb->data;
2742 struct hci_conn *conn;
2743 __u16 handle;
2744
2745 skb_pull(skb, HCI_SCO_HDR_SIZE);
2746
2747 handle = __le16_to_cpu(hdr->handle);
2748
f0e09510 2749 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
2750
2751 hdev->stat.sco_rx++;
2752
2753 hci_dev_lock(hdev);
2754 conn = hci_conn_hash_lookup_handle(hdev, handle);
2755 hci_dev_unlock(hdev);
2756
2757 if (conn) {
1da177e4 2758 /* Send to upper protocol */
686ebf28
UF
2759 sco_recv_scodata(conn, skb);
2760 return;
1da177e4 2761 } else {
8e87d142 2762 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 2763 hdev->name, handle);
1da177e4
LT
2764 }
2765
2766 kfree_skb(skb);
2767}
2768
b78752cc 2769static void hci_rx_work(struct work_struct *work)
1da177e4 2770{
b78752cc 2771 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2772 struct sk_buff *skb;
2773
2774 BT_DBG("%s", hdev->name);
2775
1da177e4 2776 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2777 /* Send copy to monitor */
2778 hci_send_to_monitor(hdev, skb);
2779
1da177e4
LT
2780 if (atomic_read(&hdev->promisc)) {
2781 /* Send copy to the sockets */
470fe1b5 2782 hci_send_to_sock(hdev, skb);
1da177e4
LT
2783 }
2784
2785 if (test_bit(HCI_RAW, &hdev->flags)) {
2786 kfree_skb(skb);
2787 continue;
2788 }
2789
2790 if (test_bit(HCI_INIT, &hdev->flags)) {
2791 /* Don't process data packets in this states. */
0d48d939 2792 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2793 case HCI_ACLDATA_PKT:
2794 case HCI_SCODATA_PKT:
2795 kfree_skb(skb);
2796 continue;
3ff50b79 2797 }
1da177e4
LT
2798 }
2799
2800 /* Process frame */
0d48d939 2801 switch (bt_cb(skb)->pkt_type) {
1da177e4 2802 case HCI_EVENT_PKT:
b78752cc 2803 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2804 hci_event_packet(hdev, skb);
2805 break;
2806
2807 case HCI_ACLDATA_PKT:
2808 BT_DBG("%s ACL data packet", hdev->name);
2809 hci_acldata_packet(hdev, skb);
2810 break;
2811
2812 case HCI_SCODATA_PKT:
2813 BT_DBG("%s SCO data packet", hdev->name);
2814 hci_scodata_packet(hdev, skb);
2815 break;
2816
2817 default:
2818 kfree_skb(skb);
2819 break;
2820 }
2821 }
1da177e4
LT
2822}
2823
c347b765 2824static void hci_cmd_work(struct work_struct *work)
1da177e4 2825{
c347b765 2826 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2827 struct sk_buff *skb;
2828
2829 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2830
1da177e4 2831 /* Send queued commands */
5a08ecce
AE
2832 if (atomic_read(&hdev->cmd_cnt)) {
2833 skb = skb_dequeue(&hdev->cmd_q);
2834 if (!skb)
2835 return;
2836
7585b97a 2837 kfree_skb(hdev->sent_cmd);
1da177e4 2838
70f23020
AE
2839 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2840 if (hdev->sent_cmd) {
1da177e4
LT
2841 atomic_dec(&hdev->cmd_cnt);
2842 hci_send_frame(skb);
7bdb8a5c
SJ
2843 if (test_bit(HCI_RESET, &hdev->flags))
2844 del_timer(&hdev->cmd_timer);
2845 else
2846 mod_timer(&hdev->cmd_timer,
5f246e89 2847 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
2848 } else {
2849 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2850 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2851 }
2852 }
2853}
2519a1fc
AG
2854
2855int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2856{
2857 /* General inquiry access code (GIAC) */
2858 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2859 struct hci_cp_inquiry cp;
2860
2861 BT_DBG("%s", hdev->name);
2862
2863 if (test_bit(HCI_INQUIRY, &hdev->flags))
2864 return -EINPROGRESS;
2865
4663262c
JH
2866 inquiry_cache_flush(hdev);
2867
2519a1fc
AG
2868 memset(&cp, 0, sizeof(cp));
2869 memcpy(&cp.lap, lap, sizeof(cp.lap));
2870 cp.length = length;
2871
2872 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2873}
023d5049
AG
2874
2875int hci_cancel_inquiry(struct hci_dev *hdev)
2876{
2877 BT_DBG("%s", hdev->name);
2878
2879 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 2880 return -EALREADY;
023d5049
AG
2881
2882 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2883}
31f7956c
AG
2884
2885u8 bdaddr_to_le(u8 bdaddr_type)
2886{
2887 switch (bdaddr_type) {
2888 case BDADDR_LE_PUBLIC:
2889 return ADDR_LE_DEV_PUBLIC;
2890
2891 default:
2892 /* Fallback to LE Random address type */
2893 return ADDR_LE_DEV_RANDOM;
2894 }
2895}