Bluetooth: Only keep controller up after init if powered on
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
28#include <linux/jiffies.h>
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
41#include <linux/workqueue.h>
42#include <linux/interrupt.h>
43#include <linux/rfkill.h>
44#include <linux/timer.h>
45#include <linux/crypto.h>
46#include <net/sock.h>
47
48#include <asm/system.h>
49#include <linux/uaccess.h>
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
55#define AUTO_OFF_TIMEOUT 2000
56
57static void hci_rx_work(struct work_struct *work);
58static void hci_cmd_work(struct work_struct *work);
59static void hci_tx_work(struct work_struct *work);
60
61/* HCI device list */
62LIST_HEAD(hci_dev_list);
63DEFINE_RWLOCK(hci_dev_list_lock);
64
65/* HCI callback list */
66LIST_HEAD(hci_cb_list);
67DEFINE_RWLOCK(hci_cb_list_lock);
68
69/* ---- HCI notifications ---- */
70
71static void hci_notify(struct hci_dev *hdev, int event)
72{
73 hci_sock_dev_event(hdev, event);
74}
75
76/* ---- HCI requests ---- */
77
78void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
79{
80 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
82 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
84 */
85 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
86 return;
87
88 if (hdev->req_status == HCI_REQ_PEND) {
89 hdev->req_result = result;
90 hdev->req_status = HCI_REQ_DONE;
91 wake_up_interruptible(&hdev->req_wait_q);
92 }
93}
94
95static void hci_req_cancel(struct hci_dev *hdev, int err)
96{
97 BT_DBG("%s err 0x%2.2x", hdev->name, err);
98
99 if (hdev->req_status == HCI_REQ_PEND) {
100 hdev->req_result = err;
101 hdev->req_status = HCI_REQ_CANCELED;
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104}
105
106/* Execute request and wait for completion. */
107static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
108 unsigned long opt, __u32 timeout)
109{
110 DECLARE_WAITQUEUE(wait, current);
111 int err = 0;
112
113 BT_DBG("%s start", hdev->name);
114
115 hdev->req_status = HCI_REQ_PEND;
116
117 add_wait_queue(&hdev->req_wait_q, &wait);
118 set_current_state(TASK_INTERRUPTIBLE);
119
120 req(hdev, opt);
121 schedule_timeout(timeout);
122
123 remove_wait_queue(&hdev->req_wait_q, &wait);
124
125 if (signal_pending(current))
126 return -EINTR;
127
128 switch (hdev->req_status) {
129 case HCI_REQ_DONE:
130 err = -bt_to_errno(hdev->req_result);
131 break;
132
133 case HCI_REQ_CANCELED:
134 err = -hdev->req_result;
135 break;
136
137 default:
138 err = -ETIMEDOUT;
139 break;
140 }
141
142 hdev->req_status = hdev->req_result = 0;
143
144 BT_DBG("%s end: err %d", hdev->name, err);
145
146 return err;
147}
148
149static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
150 unsigned long opt, __u32 timeout)
151{
152 int ret;
153
154 if (!test_bit(HCI_UP, &hdev->flags))
155 return -ENETDOWN;
156
157 /* Serialize all requests */
158 hci_req_lock(hdev);
159 ret = __hci_request(hdev, req, opt, timeout);
160 hci_req_unlock(hdev);
161
162 return ret;
163}
164
165static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
166{
167 BT_DBG("%s %ld", hdev->name, opt);
168
169 /* Reset device */
170 set_bit(HCI_RESET, &hdev->flags);
171 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
172}
173
174static void bredr_init(struct hci_dev *hdev)
175{
176 struct hci_cp_delete_stored_link_key cp;
177 __le16 param;
178 __u8 flt_type;
179
180 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
181
182 /* Mandatory initialization */
183
184 /* Reset */
185 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
188 }
189
190 /* Read Local Supported Features */
191 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
192
193 /* Read Local Version */
194 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
195
196 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
197 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
198
199 /* Read BD Address */
200 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
201
202 /* Read Class of Device */
203 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
204
205 /* Read Local Name */
206 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
207
208 /* Read Voice Setting */
209 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
210
211 /* Optional initialization */
212
213 /* Clear Event Filters */
214 flt_type = HCI_FLT_CLEAR_ALL;
215 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
216
217 /* Connection accept timeout ~20 secs */
218 param = cpu_to_le16(0x7d00);
219 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
220
221 bacpy(&cp.bdaddr, BDADDR_ANY);
222 cp.delete_all = 1;
223 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
224}
225
226static void amp_init(struct hci_dev *hdev)
227{
228 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
229
230 /* Reset */
231 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
232
233 /* Read Local Version */
234 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
235}
236
237static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
238{
239 struct sk_buff *skb;
240
241 BT_DBG("%s %ld", hdev->name, opt);
242
243 /* Driver initialization */
244
245 /* Special commands */
246 while ((skb = skb_dequeue(&hdev->driver_init))) {
247 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
248 skb->dev = (void *) hdev;
249
250 skb_queue_tail(&hdev->cmd_q, skb);
251 queue_work(hdev->workqueue, &hdev->cmd_work);
252 }
253 skb_queue_purge(&hdev->driver_init);
254
255 switch (hdev->dev_type) {
256 case HCI_BREDR:
257 bredr_init(hdev);
258 break;
259
260 case HCI_AMP:
261 amp_init(hdev);
262 break;
263
264 default:
265 BT_ERR("Unknown device type %d", hdev->dev_type);
266 break;
267 }
268
269}
270
271static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
272{
273 BT_DBG("%s", hdev->name);
274
275 /* Read LE buffer size */
276 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
277}
278
279static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
280{
281 __u8 scan = opt;
282
283 BT_DBG("%s %x", hdev->name, scan);
284
285 /* Inquiry and Page scans */
286 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
287}
288
289static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 auth = opt;
292
293 BT_DBG("%s %x", hdev->name, auth);
294
295 /* Authentication */
296 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
297}
298
299static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 encrypt = opt;
302
303 BT_DBG("%s %x", hdev->name, encrypt);
304
305 /* Encryption */
306 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
307}
308
309static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __le16 policy = cpu_to_le16(opt);
312
313 BT_DBG("%s %x", hdev->name, policy);
314
315 /* Default link policy */
316 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
317}
318
319/* Get HCI device by index.
320 * Device is held on return. */
321struct hci_dev *hci_dev_get(int index)
322{
323 struct hci_dev *hdev = NULL, *d;
324
325 BT_DBG("%d", index);
326
327 if (index < 0)
328 return NULL;
329
330 read_lock(&hci_dev_list_lock);
331 list_for_each_entry(d, &hci_dev_list, list) {
332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
334 break;
335 }
336 }
337 read_unlock(&hci_dev_list_lock);
338 return hdev;
339}
340
341/* ---- Inquiry support ---- */
342
343bool hci_discovery_active(struct hci_dev *hdev)
344{
345 struct discovery_state *discov = &hdev->discovery;
346
347 switch (discov->state) {
348 case DISCOVERY_FINDING:
349 case DISCOVERY_RESOLVING:
350 return true;
351
352 default:
353 return false;
354 }
355}
356
357void hci_discovery_set_state(struct hci_dev *hdev, int state)
358{
359 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
360
361 if (hdev->discovery.state == state)
362 return;
363
364 switch (state) {
365 case DISCOVERY_STOPPED:
366 if (hdev->discovery.state != DISCOVERY_STARTING)
367 mgmt_discovering(hdev, 0);
368 hdev->discovery.type = 0;
369 break;
370 case DISCOVERY_STARTING:
371 break;
372 case DISCOVERY_FINDING:
373 mgmt_discovering(hdev, 1);
374 break;
375 case DISCOVERY_RESOLVING:
376 break;
377 case DISCOVERY_STOPPING:
378 break;
379 }
380
381 hdev->discovery.state = state;
382}
383
384static void inquiry_cache_flush(struct hci_dev *hdev)
385{
386 struct discovery_state *cache = &hdev->discovery;
387 struct inquiry_entry *p, *n;
388
389 list_for_each_entry_safe(p, n, &cache->all, all) {
390 list_del(&p->all);
391 kfree(p);
392 }
393
394 INIT_LIST_HEAD(&cache->unknown);
395 INIT_LIST_HEAD(&cache->resolve);
396 cache->state = DISCOVERY_STOPPED;
397}
398
399struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
400{
401 struct discovery_state *cache = &hdev->discovery;
402 struct inquiry_entry *e;
403
404 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
405
406 list_for_each_entry(e, &cache->all, all) {
407 if (!bacmp(&e->data.bdaddr, bdaddr))
408 return e;
409 }
410
411 return NULL;
412}
413
414struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
415 bdaddr_t *bdaddr)
416{
417 struct discovery_state *cache = &hdev->discovery;
418 struct inquiry_entry *e;
419
420 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
421
422 list_for_each_entry(e, &cache->unknown, list) {
423 if (!bacmp(&e->data.bdaddr, bdaddr))
424 return e;
425 }
426
427 return NULL;
428}
429
430struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
431 bdaddr_t *bdaddr,
432 int state)
433{
434 struct discovery_state *cache = &hdev->discovery;
435 struct inquiry_entry *e;
436
437 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
438
439 list_for_each_entry(e, &cache->resolve, list) {
440 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
441 return e;
442 if (!bacmp(&e->data.bdaddr, bdaddr))
443 return e;
444 }
445
446 return NULL;
447}
448
449void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
450 struct inquiry_entry *ie)
451{
452 struct discovery_state *cache = &hdev->discovery;
453 struct list_head *pos = &cache->resolve;
454 struct inquiry_entry *p;
455
456 list_del(&ie->list);
457
458 list_for_each_entry(p, &cache->resolve, list) {
459 if (p->name_state != NAME_PENDING &&
460 abs(p->data.rssi) >= abs(ie->data.rssi))
461 break;
462 pos = &p->list;
463 }
464
465 list_add(&ie->list, pos);
466}
467
468bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
469 bool name_known)
470{
471 struct discovery_state *cache = &hdev->discovery;
472 struct inquiry_entry *ie;
473
474 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
475
476 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
477 if (ie) {
478 if (ie->name_state == NAME_NEEDED &&
479 data->rssi != ie->data.rssi) {
480 ie->data.rssi = data->rssi;
481 hci_inquiry_cache_update_resolve(hdev, ie);
482 }
483
484 goto update;
485 }
486
487 /* Entry not in the cache. Add new one. */
488 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
489 if (!ie)
490 return false;
491
492 list_add(&ie->all, &cache->all);
493
494 if (name_known) {
495 ie->name_state = NAME_KNOWN;
496 } else {
497 ie->name_state = NAME_NOT_KNOWN;
498 list_add(&ie->list, &cache->unknown);
499 }
500
501update:
502 if (name_known && ie->name_state != NAME_KNOWN &&
503 ie->name_state != NAME_PENDING) {
504 ie->name_state = NAME_KNOWN;
505 list_del(&ie->list);
506 }
507
508 memcpy(&ie->data, data, sizeof(*data));
509 ie->timestamp = jiffies;
510 cache->timestamp = jiffies;
511
512 if (ie->name_state == NAME_NOT_KNOWN)
513 return false;
514
515 return true;
516}
517
518static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
519{
520 struct discovery_state *cache = &hdev->discovery;
521 struct inquiry_info *info = (struct inquiry_info *) buf;
522 struct inquiry_entry *e;
523 int copied = 0;
524
525 list_for_each_entry(e, &cache->all, all) {
526 struct inquiry_data *data = &e->data;
527
528 if (copied >= num)
529 break;
530
531 bacpy(&info->bdaddr, &data->bdaddr);
532 info->pscan_rep_mode = data->pscan_rep_mode;
533 info->pscan_period_mode = data->pscan_period_mode;
534 info->pscan_mode = data->pscan_mode;
535 memcpy(info->dev_class, data->dev_class, 3);
536 info->clock_offset = data->clock_offset;
537
538 info++;
539 copied++;
540 }
541
542 BT_DBG("cache %p, copied %d", cache, copied);
543 return copied;
544}
545
546static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
547{
548 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
549 struct hci_cp_inquiry cp;
550
551 BT_DBG("%s", hdev->name);
552
553 if (test_bit(HCI_INQUIRY, &hdev->flags))
554 return;
555
556 /* Start Inquiry */
557 memcpy(&cp.lap, &ir->lap, 3);
558 cp.length = ir->length;
559 cp.num_rsp = ir->num_rsp;
560 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
561}
562
563int hci_inquiry(void __user *arg)
564{
565 __u8 __user *ptr = arg;
566 struct hci_inquiry_req ir;
567 struct hci_dev *hdev;
568 int err = 0, do_inquiry = 0, max_rsp;
569 long timeo;
570 __u8 *buf;
571
572 if (copy_from_user(&ir, ptr, sizeof(ir)))
573 return -EFAULT;
574
575 hdev = hci_dev_get(ir.dev_id);
576 if (!hdev)
577 return -ENODEV;
578
579 hci_dev_lock(hdev);
580 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
581 inquiry_cache_empty(hdev) ||
582 ir.flags & IREQ_CACHE_FLUSH) {
583 inquiry_cache_flush(hdev);
584 do_inquiry = 1;
585 }
586 hci_dev_unlock(hdev);
587
588 timeo = ir.length * msecs_to_jiffies(2000);
589
590 if (do_inquiry) {
591 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
592 if (err < 0)
593 goto done;
594 }
595
596 /* for unlimited number of responses we will use buffer with 255 entries */
597 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
598
599 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
600 * copy it to the user space.
601 */
602 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
603 if (!buf) {
604 err = -ENOMEM;
605 goto done;
606 }
607
608 hci_dev_lock(hdev);
609 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
610 hci_dev_unlock(hdev);
611
612 BT_DBG("num_rsp %d", ir.num_rsp);
613
614 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
615 ptr += sizeof(ir);
616 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
617 ir.num_rsp))
618 err = -EFAULT;
619 } else
620 err = -EFAULT;
621
622 kfree(buf);
623
624done:
625 hci_dev_put(hdev);
626 return err;
627}
628
629/* ---- HCI ioctl helpers ---- */
630
631int hci_dev_open(__u16 dev)
632{
633 struct hci_dev *hdev;
634 int ret = 0;
635
636 hdev = hci_dev_get(dev);
637 if (!hdev)
638 return -ENODEV;
639
640 BT_DBG("%s %p", hdev->name, hdev);
641
642 hci_req_lock(hdev);
643
644 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
645 ret = -ERFKILL;
646 goto done;
647 }
648
649 if (test_bit(HCI_UP, &hdev->flags)) {
650 ret = -EALREADY;
651 goto done;
652 }
653
654 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
655 set_bit(HCI_RAW, &hdev->flags);
656
657 /* Treat all non BR/EDR controllers as raw devices if
658 enable_hs is not set */
659 if (hdev->dev_type != HCI_BREDR && !enable_hs)
660 set_bit(HCI_RAW, &hdev->flags);
661
662 if (hdev->open(hdev)) {
663 ret = -EIO;
664 goto done;
665 }
666
667 if (!test_bit(HCI_RAW, &hdev->flags)) {
668 atomic_set(&hdev->cmd_cnt, 1);
669 set_bit(HCI_INIT, &hdev->flags);
670 hdev->init_last_cmd = 0;
671
672 ret = __hci_request(hdev, hci_init_req, 0,
673 msecs_to_jiffies(HCI_INIT_TIMEOUT));
674
675 if (lmp_host_le_capable(hdev))
676 ret = __hci_request(hdev, hci_le_init_req, 0,
677 msecs_to_jiffies(HCI_INIT_TIMEOUT));
678
679 clear_bit(HCI_INIT, &hdev->flags);
680 }
681
682 if (!ret) {
683 hci_dev_hold(hdev);
684 set_bit(HCI_UP, &hdev->flags);
685 hci_notify(hdev, HCI_DEV_UP);
686 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
687 hci_dev_lock(hdev);
688 mgmt_powered(hdev, 1);
689 hci_dev_unlock(hdev);
690 }
691 } else {
692 /* Init failed, cleanup */
693 flush_work(&hdev->tx_work);
694 flush_work(&hdev->cmd_work);
695 flush_work(&hdev->rx_work);
696
697 skb_queue_purge(&hdev->cmd_q);
698 skb_queue_purge(&hdev->rx_q);
699
700 if (hdev->flush)
701 hdev->flush(hdev);
702
703 if (hdev->sent_cmd) {
704 kfree_skb(hdev->sent_cmd);
705 hdev->sent_cmd = NULL;
706 }
707
708 hdev->close(hdev);
709 hdev->flags = 0;
710 }
711
712done:
713 hci_req_unlock(hdev);
714 hci_dev_put(hdev);
715 return ret;
716}
717
718static int hci_dev_do_close(struct hci_dev *hdev)
719{
720 BT_DBG("%s %p", hdev->name, hdev);
721
722 cancel_work_sync(&hdev->le_scan);
723
724 hci_req_cancel(hdev, ENODEV);
725 hci_req_lock(hdev);
726
727 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
728 del_timer_sync(&hdev->cmd_timer);
729 hci_req_unlock(hdev);
730 return 0;
731 }
732
733 /* Flush RX and TX works */
734 flush_work(&hdev->tx_work);
735 flush_work(&hdev->rx_work);
736
737 if (hdev->discov_timeout > 0) {
738 cancel_delayed_work(&hdev->discov_off);
739 hdev->discov_timeout = 0;
740 }
741
742 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
743 cancel_delayed_work(&hdev->power_off);
744
745 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
746 cancel_delayed_work(&hdev->service_cache);
747
748 cancel_delayed_work_sync(&hdev->le_scan_disable);
749
750 hci_dev_lock(hdev);
751 inquiry_cache_flush(hdev);
752 hci_conn_hash_flush(hdev);
753 hci_dev_unlock(hdev);
754
755 hci_notify(hdev, HCI_DEV_DOWN);
756
757 if (hdev->flush)
758 hdev->flush(hdev);
759
760 /* Reset device */
761 skb_queue_purge(&hdev->cmd_q);
762 atomic_set(&hdev->cmd_cnt, 1);
763 if (!test_bit(HCI_RAW, &hdev->flags) &&
764 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
765 set_bit(HCI_INIT, &hdev->flags);
766 __hci_request(hdev, hci_reset_req, 0,
767 msecs_to_jiffies(250));
768 clear_bit(HCI_INIT, &hdev->flags);
769 }
770
771 /* flush cmd work */
772 flush_work(&hdev->cmd_work);
773
774 /* Drop queues */
775 skb_queue_purge(&hdev->rx_q);
776 skb_queue_purge(&hdev->cmd_q);
777 skb_queue_purge(&hdev->raw_q);
778
779 /* Drop last sent command */
780 if (hdev->sent_cmd) {
781 del_timer_sync(&hdev->cmd_timer);
782 kfree_skb(hdev->sent_cmd);
783 hdev->sent_cmd = NULL;
784 }
785
786 /* After this point our queues are empty
787 * and no tasks are scheduled. */
788 hdev->close(hdev);
789
790 hci_dev_lock(hdev);
791 mgmt_powered(hdev, 0);
792 hci_dev_unlock(hdev);
793
794 /* Clear flags */
795 hdev->flags = 0;
796
797 hci_req_unlock(hdev);
798
799 hci_dev_put(hdev);
800 return 0;
801}
802
803int hci_dev_close(__u16 dev)
804{
805 struct hci_dev *hdev;
806 int err;
807
808 hdev = hci_dev_get(dev);
809 if (!hdev)
810 return -ENODEV;
811 err = hci_dev_do_close(hdev);
812 hci_dev_put(hdev);
813 return err;
814}
815
816int hci_dev_reset(__u16 dev)
817{
818 struct hci_dev *hdev;
819 int ret = 0;
820
821 hdev = hci_dev_get(dev);
822 if (!hdev)
823 return -ENODEV;
824
825 hci_req_lock(hdev);
826
827 if (!test_bit(HCI_UP, &hdev->flags))
828 goto done;
829
830 /* Drop queues */
831 skb_queue_purge(&hdev->rx_q);
832 skb_queue_purge(&hdev->cmd_q);
833
834 hci_dev_lock(hdev);
835 inquiry_cache_flush(hdev);
836 hci_conn_hash_flush(hdev);
837 hci_dev_unlock(hdev);
838
839 if (hdev->flush)
840 hdev->flush(hdev);
841
842 atomic_set(&hdev->cmd_cnt, 1);
843 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
844
845 if (!test_bit(HCI_RAW, &hdev->flags))
846 ret = __hci_request(hdev, hci_reset_req, 0,
847 msecs_to_jiffies(HCI_INIT_TIMEOUT));
848
849done:
850 hci_req_unlock(hdev);
851 hci_dev_put(hdev);
852 return ret;
853}
854
855int hci_dev_reset_stat(__u16 dev)
856{
857 struct hci_dev *hdev;
858 int ret = 0;
859
860 hdev = hci_dev_get(dev);
861 if (!hdev)
862 return -ENODEV;
863
864 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
865
866 hci_dev_put(hdev);
867
868 return ret;
869}
870
871int hci_dev_cmd(unsigned int cmd, void __user *arg)
872{
873 struct hci_dev *hdev;
874 struct hci_dev_req dr;
875 int err = 0;
876
877 if (copy_from_user(&dr, arg, sizeof(dr)))
878 return -EFAULT;
879
880 hdev = hci_dev_get(dr.dev_id);
881 if (!hdev)
882 return -ENODEV;
883
884 switch (cmd) {
885 case HCISETAUTH:
886 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
887 msecs_to_jiffies(HCI_INIT_TIMEOUT));
888 break;
889
890 case HCISETENCRYPT:
891 if (!lmp_encrypt_capable(hdev)) {
892 err = -EOPNOTSUPP;
893 break;
894 }
895
896 if (!test_bit(HCI_AUTH, &hdev->flags)) {
897 /* Auth must be enabled first */
898 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
899 msecs_to_jiffies(HCI_INIT_TIMEOUT));
900 if (err)
901 break;
902 }
903
904 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
905 msecs_to_jiffies(HCI_INIT_TIMEOUT));
906 break;
907
908 case HCISETSCAN:
909 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
910 msecs_to_jiffies(HCI_INIT_TIMEOUT));
911 break;
912
913 case HCISETLINKPOL:
914 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
915 msecs_to_jiffies(HCI_INIT_TIMEOUT));
916 break;
917
918 case HCISETLINKMODE:
919 hdev->link_mode = ((__u16) dr.dev_opt) &
920 (HCI_LM_MASTER | HCI_LM_ACCEPT);
921 break;
922
923 case HCISETPTYPE:
924 hdev->pkt_type = (__u16) dr.dev_opt;
925 break;
926
927 case HCISETACLMTU:
928 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
929 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
930 break;
931
932 case HCISETSCOMTU:
933 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
934 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
935 break;
936
937 default:
938 err = -EINVAL;
939 break;
940 }
941
942 hci_dev_put(hdev);
943 return err;
944}
945
946int hci_get_dev_list(void __user *arg)
947{
948 struct hci_dev *hdev;
949 struct hci_dev_list_req *dl;
950 struct hci_dev_req *dr;
951 int n = 0, size, err;
952 __u16 dev_num;
953
954 if (get_user(dev_num, (__u16 __user *) arg))
955 return -EFAULT;
956
957 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
958 return -EINVAL;
959
960 size = sizeof(*dl) + dev_num * sizeof(*dr);
961
962 dl = kzalloc(size, GFP_KERNEL);
963 if (!dl)
964 return -ENOMEM;
965
966 dr = dl->dev_req;
967
968 read_lock(&hci_dev_list_lock);
969 list_for_each_entry(hdev, &hci_dev_list, list) {
970 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
971 cancel_delayed_work(&hdev->power_off);
972
973 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
974 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
975
976 (dr + n)->dev_id = hdev->id;
977 (dr + n)->dev_opt = hdev->flags;
978
979 if (++n >= dev_num)
980 break;
981 }
982 read_unlock(&hci_dev_list_lock);
983
984 dl->dev_num = n;
985 size = sizeof(*dl) + n * sizeof(*dr);
986
987 err = copy_to_user(arg, dl, size);
988 kfree(dl);
989
990 return err ? -EFAULT : 0;
991}
992
993int hci_get_dev_info(void __user *arg)
994{
995 struct hci_dev *hdev;
996 struct hci_dev_info di;
997 int err = 0;
998
999 if (copy_from_user(&di, arg, sizeof(di)))
1000 return -EFAULT;
1001
1002 hdev = hci_dev_get(di.dev_id);
1003 if (!hdev)
1004 return -ENODEV;
1005
1006 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1007 cancel_delayed_work_sync(&hdev->power_off);
1008
1009 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1010 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1011
1012 strcpy(di.name, hdev->name);
1013 di.bdaddr = hdev->bdaddr;
1014 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1015 di.flags = hdev->flags;
1016 di.pkt_type = hdev->pkt_type;
1017 di.acl_mtu = hdev->acl_mtu;
1018 di.acl_pkts = hdev->acl_pkts;
1019 di.sco_mtu = hdev->sco_mtu;
1020 di.sco_pkts = hdev->sco_pkts;
1021 di.link_policy = hdev->link_policy;
1022 di.link_mode = hdev->link_mode;
1023
1024 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1025 memcpy(&di.features, &hdev->features, sizeof(di.features));
1026
1027 if (copy_to_user(arg, &di, sizeof(di)))
1028 err = -EFAULT;
1029
1030 hci_dev_put(hdev);
1031
1032 return err;
1033}
1034
1035/* ---- Interface to HCI drivers ---- */
1036
1037static int hci_rfkill_set_block(void *data, bool blocked)
1038{
1039 struct hci_dev *hdev = data;
1040
1041 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1042
1043 if (!blocked)
1044 return 0;
1045
1046 hci_dev_do_close(hdev);
1047
1048 return 0;
1049}
1050
1051static const struct rfkill_ops hci_rfkill_ops = {
1052 .set_block = hci_rfkill_set_block,
1053};
1054
1055/* Alloc HCI device */
1056struct hci_dev *hci_alloc_dev(void)
1057{
1058 struct hci_dev *hdev;
1059
1060 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1061 if (!hdev)
1062 return NULL;
1063
1064 hci_init_sysfs(hdev);
1065 skb_queue_head_init(&hdev->driver_init);
1066
1067 return hdev;
1068}
1069EXPORT_SYMBOL(hci_alloc_dev);
1070
1071/* Free HCI device */
1072void hci_free_dev(struct hci_dev *hdev)
1073{
1074 skb_queue_purge(&hdev->driver_init);
1075
1076 /* will free via device release */
1077 put_device(&hdev->dev);
1078}
1079EXPORT_SYMBOL(hci_free_dev);
1080
1081static void hci_power_on(struct work_struct *work)
1082{
1083 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1084
1085 BT_DBG("%s", hdev->name);
1086
1087 if (hci_dev_open(hdev->id) < 0)
1088 return;
1089
1090 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1091 schedule_delayed_work(&hdev->power_off,
1092 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1093
1094 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1095 mgmt_index_added(hdev);
1096}
1097
1098static void hci_power_off(struct work_struct *work)
1099{
1100 struct hci_dev *hdev = container_of(work, struct hci_dev,
1101 power_off.work);
1102
1103 BT_DBG("%s", hdev->name);
1104
1105 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1106
1107 hci_dev_close(hdev->id);
1108}
1109
1110static void hci_discov_off(struct work_struct *work)
1111{
1112 struct hci_dev *hdev;
1113 u8 scan = SCAN_PAGE;
1114
1115 hdev = container_of(work, struct hci_dev, discov_off.work);
1116
1117 BT_DBG("%s", hdev->name);
1118
1119 hci_dev_lock(hdev);
1120
1121 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1122
1123 hdev->discov_timeout = 0;
1124
1125 hci_dev_unlock(hdev);
1126}
1127
1128int hci_uuids_clear(struct hci_dev *hdev)
1129{
1130 struct list_head *p, *n;
1131
1132 list_for_each_safe(p, n, &hdev->uuids) {
1133 struct bt_uuid *uuid;
1134
1135 uuid = list_entry(p, struct bt_uuid, list);
1136
1137 list_del(p);
1138 kfree(uuid);
1139 }
1140
1141 return 0;
1142}
1143
1144int hci_link_keys_clear(struct hci_dev *hdev)
1145{
1146 struct list_head *p, *n;
1147
1148 list_for_each_safe(p, n, &hdev->link_keys) {
1149 struct link_key *key;
1150
1151 key = list_entry(p, struct link_key, list);
1152
1153 list_del(p);
1154 kfree(key);
1155 }
1156
1157 return 0;
1158}
1159
1160int hci_smp_ltks_clear(struct hci_dev *hdev)
1161{
1162 struct smp_ltk *k, *tmp;
1163
1164 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1165 list_del(&k->list);
1166 kfree(k);
1167 }
1168
1169 return 0;
1170}
1171
1172struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1173{
1174 struct link_key *k;
1175
1176 list_for_each_entry(k, &hdev->link_keys, list)
1177 if (bacmp(bdaddr, &k->bdaddr) == 0)
1178 return k;
1179
1180 return NULL;
1181}
1182
1183static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1184 u8 key_type, u8 old_key_type)
1185{
1186 /* Legacy key */
1187 if (key_type < 0x03)
1188 return 1;
1189
1190 /* Debug keys are insecure so don't store them persistently */
1191 if (key_type == HCI_LK_DEBUG_COMBINATION)
1192 return 0;
1193
1194 /* Changed combination key and there's no previous one */
1195 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1196 return 0;
1197
1198 /* Security mode 3 case */
1199 if (!conn)
1200 return 1;
1201
1202 /* Neither local nor remote side had no-bonding as requirement */
1203 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1204 return 1;
1205
1206 /* Local side had dedicated bonding as requirement */
1207 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1208 return 1;
1209
1210 /* Remote side had dedicated bonding as requirement */
1211 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1212 return 1;
1213
1214 /* If none of the above criteria match, then don't store the key
1215 * persistently */
1216 return 0;
1217}
1218
1219struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1220{
1221 struct smp_ltk *k;
1222
1223 list_for_each_entry(k, &hdev->long_term_keys, list) {
1224 if (k->ediv != ediv ||
1225 memcmp(rand, k->rand, sizeof(k->rand)))
1226 continue;
1227
1228 return k;
1229 }
1230
1231 return NULL;
1232}
1233EXPORT_SYMBOL(hci_find_ltk);
1234
1235struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1236 u8 addr_type)
1237{
1238 struct smp_ltk *k;
1239
1240 list_for_each_entry(k, &hdev->long_term_keys, list)
1241 if (addr_type == k->bdaddr_type &&
1242 bacmp(bdaddr, &k->bdaddr) == 0)
1243 return k;
1244
1245 return NULL;
1246}
1247EXPORT_SYMBOL(hci_find_ltk_by_addr);
1248
1249int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1250 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1251{
1252 struct link_key *key, *old_key;
1253 u8 old_key_type, persistent;
1254
1255 old_key = hci_find_link_key(hdev, bdaddr);
1256 if (old_key) {
1257 old_key_type = old_key->type;
1258 key = old_key;
1259 } else {
1260 old_key_type = conn ? conn->key_type : 0xff;
1261 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1262 if (!key)
1263 return -ENOMEM;
1264 list_add(&key->list, &hdev->link_keys);
1265 }
1266
1267 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1268
1269 /* Some buggy controller combinations generate a changed
1270 * combination key for legacy pairing even when there's no
1271 * previous key */
1272 if (type == HCI_LK_CHANGED_COMBINATION &&
1273 (!conn || conn->remote_auth == 0xff) &&
1274 old_key_type == 0xff) {
1275 type = HCI_LK_COMBINATION;
1276 if (conn)
1277 conn->key_type = type;
1278 }
1279
1280 bacpy(&key->bdaddr, bdaddr);
1281 memcpy(key->val, val, 16);
1282 key->pin_len = pin_len;
1283
1284 if (type == HCI_LK_CHANGED_COMBINATION)
1285 key->type = old_key_type;
1286 else
1287 key->type = type;
1288
1289 if (!new_key)
1290 return 0;
1291
1292 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1293
1294 mgmt_new_link_key(hdev, key, persistent);
1295
1296 if (!persistent) {
1297 list_del(&key->list);
1298 kfree(key);
1299 }
1300
1301 return 0;
1302}
1303
1304int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1305 int new_key, u8 authenticated, u8 tk[16],
1306 u8 enc_size, u16 ediv, u8 rand[8])
1307{
1308 struct smp_ltk *key, *old_key;
1309
1310 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1311 return 0;
1312
1313 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1314 if (old_key)
1315 key = old_key;
1316 else {
1317 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1318 if (!key)
1319 return -ENOMEM;
1320 list_add(&key->list, &hdev->long_term_keys);
1321 }
1322
1323 bacpy(&key->bdaddr, bdaddr);
1324 key->bdaddr_type = addr_type;
1325 memcpy(key->val, tk, sizeof(key->val));
1326 key->authenticated = authenticated;
1327 key->ediv = ediv;
1328 key->enc_size = enc_size;
1329 key->type = type;
1330 memcpy(key->rand, rand, sizeof(key->rand));
1331
1332 if (!new_key)
1333 return 0;
1334
1335 if (type & HCI_SMP_LTK)
1336 mgmt_new_ltk(hdev, key, 1);
1337
1338 return 0;
1339}
1340
1341int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1342{
1343 struct link_key *key;
1344
1345 key = hci_find_link_key(hdev, bdaddr);
1346 if (!key)
1347 return -ENOENT;
1348
1349 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1350
1351 list_del(&key->list);
1352 kfree(key);
1353
1354 return 0;
1355}
1356
1357int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1358{
1359 struct smp_ltk *k, *tmp;
1360
1361 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1362 if (bacmp(bdaddr, &k->bdaddr))
1363 continue;
1364
1365 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1366
1367 list_del(&k->list);
1368 kfree(k);
1369 }
1370
1371 return 0;
1372}
1373
1374/* HCI command timer function */
1375static void hci_cmd_timer(unsigned long arg)
1376{
1377 struct hci_dev *hdev = (void *) arg;
1378
1379 BT_ERR("%s command tx timeout", hdev->name);
1380 atomic_set(&hdev->cmd_cnt, 1);
1381 queue_work(hdev->workqueue, &hdev->cmd_work);
1382}
1383
1384struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1385 bdaddr_t *bdaddr)
1386{
1387 struct oob_data *data;
1388
1389 list_for_each_entry(data, &hdev->remote_oob_data, list)
1390 if (bacmp(bdaddr, &data->bdaddr) == 0)
1391 return data;
1392
1393 return NULL;
1394}
1395
1396int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1397{
1398 struct oob_data *data;
1399
1400 data = hci_find_remote_oob_data(hdev, bdaddr);
1401 if (!data)
1402 return -ENOENT;
1403
1404 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1405
1406 list_del(&data->list);
1407 kfree(data);
1408
1409 return 0;
1410}
1411
1412int hci_remote_oob_data_clear(struct hci_dev *hdev)
1413{
1414 struct oob_data *data, *n;
1415
1416 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1417 list_del(&data->list);
1418 kfree(data);
1419 }
1420
1421 return 0;
1422}
1423
1424int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1425 u8 *randomizer)
1426{
1427 struct oob_data *data;
1428
1429 data = hci_find_remote_oob_data(hdev, bdaddr);
1430
1431 if (!data) {
1432 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1433 if (!data)
1434 return -ENOMEM;
1435
1436 bacpy(&data->bdaddr, bdaddr);
1437 list_add(&data->list, &hdev->remote_oob_data);
1438 }
1439
1440 memcpy(data->hash, hash, sizeof(data->hash));
1441 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1442
1443 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1444
1445 return 0;
1446}
1447
1448struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1449 bdaddr_t *bdaddr)
1450{
1451 struct bdaddr_list *b;
1452
1453 list_for_each_entry(b, &hdev->blacklist, list)
1454 if (bacmp(bdaddr, &b->bdaddr) == 0)
1455 return b;
1456
1457 return NULL;
1458}
1459
1460int hci_blacklist_clear(struct hci_dev *hdev)
1461{
1462 struct list_head *p, *n;
1463
1464 list_for_each_safe(p, n, &hdev->blacklist) {
1465 struct bdaddr_list *b;
1466
1467 b = list_entry(p, struct bdaddr_list, list);
1468
1469 list_del(p);
1470 kfree(b);
1471 }
1472
1473 return 0;
1474}
1475
1476int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1477{
1478 struct bdaddr_list *entry;
1479
1480 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1481 return -EBADF;
1482
1483 if (hci_blacklist_lookup(hdev, bdaddr))
1484 return -EEXIST;
1485
1486 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1487 if (!entry)
1488 return -ENOMEM;
1489
1490 bacpy(&entry->bdaddr, bdaddr);
1491
1492 list_add(&entry->list, &hdev->blacklist);
1493
1494 return mgmt_device_blocked(hdev, bdaddr, type);
1495}
1496
1497int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1498{
1499 struct bdaddr_list *entry;
1500
1501 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1502 return hci_blacklist_clear(hdev);
1503
1504 entry = hci_blacklist_lookup(hdev, bdaddr);
1505 if (!entry)
1506 return -ENOENT;
1507
1508 list_del(&entry->list);
1509 kfree(entry);
1510
1511 return mgmt_device_unblocked(hdev, bdaddr, type);
1512}
1513
1514static void hci_clear_adv_cache(struct work_struct *work)
1515{
1516 struct hci_dev *hdev = container_of(work, struct hci_dev,
1517 adv_work.work);
1518
1519 hci_dev_lock(hdev);
1520
1521 hci_adv_entries_clear(hdev);
1522
1523 hci_dev_unlock(hdev);
1524}
1525
1526int hci_adv_entries_clear(struct hci_dev *hdev)
1527{
1528 struct adv_entry *entry, *tmp;
1529
1530 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1531 list_del(&entry->list);
1532 kfree(entry);
1533 }
1534
1535 BT_DBG("%s adv cache cleared", hdev->name);
1536
1537 return 0;
1538}
1539
1540struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1541{
1542 struct adv_entry *entry;
1543
1544 list_for_each_entry(entry, &hdev->adv_entries, list)
1545 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1546 return entry;
1547
1548 return NULL;
1549}
1550
1551static inline int is_connectable_adv(u8 evt_type)
1552{
1553 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1554 return 1;
1555
1556 return 0;
1557}
1558
1559int hci_add_adv_entry(struct hci_dev *hdev,
1560 struct hci_ev_le_advertising_info *ev)
1561{
1562 struct adv_entry *entry;
1563
1564 if (!is_connectable_adv(ev->evt_type))
1565 return -EINVAL;
1566
1567 /* Only new entries should be added to adv_entries. So, if
1568 * bdaddr was found, don't add it. */
1569 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1570 return 0;
1571
1572 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1573 if (!entry)
1574 return -ENOMEM;
1575
1576 bacpy(&entry->bdaddr, &ev->bdaddr);
1577 entry->bdaddr_type = ev->bdaddr_type;
1578
1579 list_add(&entry->list, &hdev->adv_entries);
1580
1581 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1582 batostr(&entry->bdaddr), entry->bdaddr_type);
1583
1584 return 0;
1585}
1586
1587static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1588{
1589 struct le_scan_params *param = (struct le_scan_params *) opt;
1590 struct hci_cp_le_set_scan_param cp;
1591
1592 memset(&cp, 0, sizeof(cp));
1593 cp.type = param->type;
1594 cp.interval = cpu_to_le16(param->interval);
1595 cp.window = cpu_to_le16(param->window);
1596
1597 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1598}
1599
1600static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1601{
1602 struct hci_cp_le_set_scan_enable cp;
1603
1604 memset(&cp, 0, sizeof(cp));
1605 cp.enable = 1;
1606
1607 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1608}
1609
1610static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1611 u16 window, int timeout)
1612{
1613 long timeo = msecs_to_jiffies(3000);
1614 struct le_scan_params param;
1615 int err;
1616
1617 BT_DBG("%s", hdev->name);
1618
1619 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1620 return -EINPROGRESS;
1621
1622 param.type = type;
1623 param.interval = interval;
1624 param.window = window;
1625
1626 hci_req_lock(hdev);
1627
1628 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1629 timeo);
1630 if (!err)
1631 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1632
1633 hci_req_unlock(hdev);
1634
1635 if (err < 0)
1636 return err;
1637
1638 schedule_delayed_work(&hdev->le_scan_disable,
1639 msecs_to_jiffies(timeout));
1640
1641 return 0;
1642}
1643
1644static void le_scan_disable_work(struct work_struct *work)
1645{
1646 struct hci_dev *hdev = container_of(work, struct hci_dev,
1647 le_scan_disable.work);
1648 struct hci_cp_le_set_scan_enable cp;
1649
1650 BT_DBG("%s", hdev->name);
1651
1652 memset(&cp, 0, sizeof(cp));
1653
1654 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1655}
1656
1657static void le_scan_work(struct work_struct *work)
1658{
1659 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1660 struct le_scan_params *param = &hdev->le_scan_params;
1661
1662 BT_DBG("%s", hdev->name);
1663
1664 hci_do_le_scan(hdev, param->type, param->interval,
1665 param->window, param->timeout);
1666}
1667
1668int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1669 int timeout)
1670{
1671 struct le_scan_params *param = &hdev->le_scan_params;
1672
1673 BT_DBG("%s", hdev->name);
1674
1675 if (work_busy(&hdev->le_scan))
1676 return -EINPROGRESS;
1677
1678 param->type = type;
1679 param->interval = interval;
1680 param->window = window;
1681 param->timeout = timeout;
1682
1683 queue_work(system_long_wq, &hdev->le_scan);
1684
1685 return 0;
1686}
1687
1688/* Register HCI device */
1689int hci_register_dev(struct hci_dev *hdev)
1690{
1691 struct list_head *head = &hci_dev_list, *p;
1692 int i, id, error;
1693
1694 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1695
1696 if (!hdev->open || !hdev->close)
1697 return -EINVAL;
1698
1699 /* Do not allow HCI_AMP devices to register at index 0,
1700 * so the index can be used as the AMP controller ID.
1701 */
1702 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1703
1704 write_lock(&hci_dev_list_lock);
1705
1706 /* Find first available device id */
1707 list_for_each(p, &hci_dev_list) {
1708 if (list_entry(p, struct hci_dev, list)->id != id)
1709 break;
1710 head = p; id++;
1711 }
1712
1713 sprintf(hdev->name, "hci%d", id);
1714 hdev->id = id;
1715 list_add_tail(&hdev->list, head);
1716
1717 mutex_init(&hdev->lock);
1718
1719 hdev->flags = 0;
1720 hdev->dev_flags = 0;
1721 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1722 hdev->esco_type = (ESCO_HV1);
1723 hdev->link_mode = (HCI_LM_ACCEPT);
1724 hdev->io_capability = 0x03; /* No Input No Output */
1725
1726 hdev->idle_timeout = 0;
1727 hdev->sniff_max_interval = 800;
1728 hdev->sniff_min_interval = 80;
1729
1730 INIT_WORK(&hdev->rx_work, hci_rx_work);
1731 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1732 INIT_WORK(&hdev->tx_work, hci_tx_work);
1733
1734
1735 skb_queue_head_init(&hdev->rx_q);
1736 skb_queue_head_init(&hdev->cmd_q);
1737 skb_queue_head_init(&hdev->raw_q);
1738
1739 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1740
1741 for (i = 0; i < NUM_REASSEMBLY; i++)
1742 hdev->reassembly[i] = NULL;
1743
1744 init_waitqueue_head(&hdev->req_wait_q);
1745 mutex_init(&hdev->req_lock);
1746
1747 discovery_init(hdev);
1748
1749 hci_conn_hash_init(hdev);
1750
1751 INIT_LIST_HEAD(&hdev->mgmt_pending);
1752
1753 INIT_LIST_HEAD(&hdev->blacklist);
1754
1755 INIT_LIST_HEAD(&hdev->uuids);
1756
1757 INIT_LIST_HEAD(&hdev->link_keys);
1758 INIT_LIST_HEAD(&hdev->long_term_keys);
1759
1760 INIT_LIST_HEAD(&hdev->remote_oob_data);
1761
1762 INIT_LIST_HEAD(&hdev->adv_entries);
1763
1764 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1765 INIT_WORK(&hdev->power_on, hci_power_on);
1766 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1767
1768 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1769
1770 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1771
1772 atomic_set(&hdev->promisc, 0);
1773
1774 INIT_WORK(&hdev->le_scan, le_scan_work);
1775
1776 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1777
1778 write_unlock(&hci_dev_list_lock);
1779
1780 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1781 WQ_MEM_RECLAIM, 1);
1782 if (!hdev->workqueue) {
1783 error = -ENOMEM;
1784 goto err;
1785 }
1786
1787 error = hci_add_sysfs(hdev);
1788 if (error < 0)
1789 goto err_wqueue;
1790
1791 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1792 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1793 if (hdev->rfkill) {
1794 if (rfkill_register(hdev->rfkill) < 0) {
1795 rfkill_destroy(hdev->rfkill);
1796 hdev->rfkill = NULL;
1797 }
1798 }
1799
1800 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1801 set_bit(HCI_SETUP, &hdev->dev_flags);
1802 schedule_work(&hdev->power_on);
1803
1804 hci_notify(hdev, HCI_DEV_REG);
1805 hci_dev_hold(hdev);
1806
1807 return id;
1808
1809err_wqueue:
1810 destroy_workqueue(hdev->workqueue);
1811err:
1812 write_lock(&hci_dev_list_lock);
1813 list_del(&hdev->list);
1814 write_unlock(&hci_dev_list_lock);
1815
1816 return error;
1817}
1818EXPORT_SYMBOL(hci_register_dev);
1819
1820/* Unregister HCI device */
1821void hci_unregister_dev(struct hci_dev *hdev)
1822{
1823 int i;
1824
1825 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1826
1827 write_lock(&hci_dev_list_lock);
1828 list_del(&hdev->list);
1829 write_unlock(&hci_dev_list_lock);
1830
1831 hci_dev_do_close(hdev);
1832
1833 for (i = 0; i < NUM_REASSEMBLY; i++)
1834 kfree_skb(hdev->reassembly[i]);
1835
1836 if (!test_bit(HCI_INIT, &hdev->flags) &&
1837 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1838 hci_dev_lock(hdev);
1839 mgmt_index_removed(hdev);
1840 hci_dev_unlock(hdev);
1841 }
1842
1843 /* mgmt_index_removed should take care of emptying the
1844 * pending list */
1845 BUG_ON(!list_empty(&hdev->mgmt_pending));
1846
1847 hci_notify(hdev, HCI_DEV_UNREG);
1848
1849 if (hdev->rfkill) {
1850 rfkill_unregister(hdev->rfkill);
1851 rfkill_destroy(hdev->rfkill);
1852 }
1853
1854 hci_del_sysfs(hdev);
1855
1856 cancel_delayed_work_sync(&hdev->adv_work);
1857
1858 destroy_workqueue(hdev->workqueue);
1859
1860 hci_dev_lock(hdev);
1861 hci_blacklist_clear(hdev);
1862 hci_uuids_clear(hdev);
1863 hci_link_keys_clear(hdev);
1864 hci_smp_ltks_clear(hdev);
1865 hci_remote_oob_data_clear(hdev);
1866 hci_adv_entries_clear(hdev);
1867 hci_dev_unlock(hdev);
1868
1869 hci_dev_put(hdev);
1870}
1871EXPORT_SYMBOL(hci_unregister_dev);
1872
1873/* Suspend HCI device */
1874int hci_suspend_dev(struct hci_dev *hdev)
1875{
1876 hci_notify(hdev, HCI_DEV_SUSPEND);
1877 return 0;
1878}
1879EXPORT_SYMBOL(hci_suspend_dev);
1880
1881/* Resume HCI device */
1882int hci_resume_dev(struct hci_dev *hdev)
1883{
1884 hci_notify(hdev, HCI_DEV_RESUME);
1885 return 0;
1886}
1887EXPORT_SYMBOL(hci_resume_dev);
1888
1889/* Receive frame from HCI drivers */
1890int hci_recv_frame(struct sk_buff *skb)
1891{
1892 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1893 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1894 && !test_bit(HCI_INIT, &hdev->flags))) {
1895 kfree_skb(skb);
1896 return -ENXIO;
1897 }
1898
1899 /* Incomming skb */
1900 bt_cb(skb)->incoming = 1;
1901
1902 /* Time stamp */
1903 __net_timestamp(skb);
1904
1905 skb_queue_tail(&hdev->rx_q, skb);
1906 queue_work(hdev->workqueue, &hdev->rx_work);
1907
1908 return 0;
1909}
1910EXPORT_SYMBOL(hci_recv_frame);
1911
1912static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1913 int count, __u8 index)
1914{
1915 int len = 0;
1916 int hlen = 0;
1917 int remain = count;
1918 struct sk_buff *skb;
1919 struct bt_skb_cb *scb;
1920
1921 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1922 index >= NUM_REASSEMBLY)
1923 return -EILSEQ;
1924
1925 skb = hdev->reassembly[index];
1926
1927 if (!skb) {
1928 switch (type) {
1929 case HCI_ACLDATA_PKT:
1930 len = HCI_MAX_FRAME_SIZE;
1931 hlen = HCI_ACL_HDR_SIZE;
1932 break;
1933 case HCI_EVENT_PKT:
1934 len = HCI_MAX_EVENT_SIZE;
1935 hlen = HCI_EVENT_HDR_SIZE;
1936 break;
1937 case HCI_SCODATA_PKT:
1938 len = HCI_MAX_SCO_SIZE;
1939 hlen = HCI_SCO_HDR_SIZE;
1940 break;
1941 }
1942
1943 skb = bt_skb_alloc(len, GFP_ATOMIC);
1944 if (!skb)
1945 return -ENOMEM;
1946
1947 scb = (void *) skb->cb;
1948 scb->expect = hlen;
1949 scb->pkt_type = type;
1950
1951 skb->dev = (void *) hdev;
1952 hdev->reassembly[index] = skb;
1953 }
1954
1955 while (count) {
1956 scb = (void *) skb->cb;
1957 len = min(scb->expect, (__u16)count);
1958
1959 memcpy(skb_put(skb, len), data, len);
1960
1961 count -= len;
1962 data += len;
1963 scb->expect -= len;
1964 remain = count;
1965
1966 switch (type) {
1967 case HCI_EVENT_PKT:
1968 if (skb->len == HCI_EVENT_HDR_SIZE) {
1969 struct hci_event_hdr *h = hci_event_hdr(skb);
1970 scb->expect = h->plen;
1971
1972 if (skb_tailroom(skb) < scb->expect) {
1973 kfree_skb(skb);
1974 hdev->reassembly[index] = NULL;
1975 return -ENOMEM;
1976 }
1977 }
1978 break;
1979
1980 case HCI_ACLDATA_PKT:
1981 if (skb->len == HCI_ACL_HDR_SIZE) {
1982 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1983 scb->expect = __le16_to_cpu(h->dlen);
1984
1985 if (skb_tailroom(skb) < scb->expect) {
1986 kfree_skb(skb);
1987 hdev->reassembly[index] = NULL;
1988 return -ENOMEM;
1989 }
1990 }
1991 break;
1992
1993 case HCI_SCODATA_PKT:
1994 if (skb->len == HCI_SCO_HDR_SIZE) {
1995 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1996 scb->expect = h->dlen;
1997
1998 if (skb_tailroom(skb) < scb->expect) {
1999 kfree_skb(skb);
2000 hdev->reassembly[index] = NULL;
2001 return -ENOMEM;
2002 }
2003 }
2004 break;
2005 }
2006
2007 if (scb->expect == 0) {
2008 /* Complete frame */
2009
2010 bt_cb(skb)->pkt_type = type;
2011 hci_recv_frame(skb);
2012
2013 hdev->reassembly[index] = NULL;
2014 return remain;
2015 }
2016 }
2017
2018 return remain;
2019}
2020
2021int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2022{
2023 int rem = 0;
2024
2025 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2026 return -EILSEQ;
2027
2028 while (count) {
2029 rem = hci_reassembly(hdev, type, data, count, type - 1);
2030 if (rem < 0)
2031 return rem;
2032
2033 data += (count - rem);
2034 count = rem;
2035 }
2036
2037 return rem;
2038}
2039EXPORT_SYMBOL(hci_recv_fragment);
2040
2041#define STREAM_REASSEMBLY 0
2042
2043int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2044{
2045 int type;
2046 int rem = 0;
2047
2048 while (count) {
2049 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2050
2051 if (!skb) {
2052 struct { char type; } *pkt;
2053
2054 /* Start of the frame */
2055 pkt = data;
2056 type = pkt->type;
2057
2058 data++;
2059 count--;
2060 } else
2061 type = bt_cb(skb)->pkt_type;
2062
2063 rem = hci_reassembly(hdev, type, data, count,
2064 STREAM_REASSEMBLY);
2065 if (rem < 0)
2066 return rem;
2067
2068 data += (count - rem);
2069 count = rem;
2070 }
2071
2072 return rem;
2073}
2074EXPORT_SYMBOL(hci_recv_stream_fragment);
2075
2076/* ---- Interface to upper protocols ---- */
2077
2078int hci_register_cb(struct hci_cb *cb)
2079{
2080 BT_DBG("%p name %s", cb, cb->name);
2081
2082 write_lock(&hci_cb_list_lock);
2083 list_add(&cb->list, &hci_cb_list);
2084 write_unlock(&hci_cb_list_lock);
2085
2086 return 0;
2087}
2088EXPORT_SYMBOL(hci_register_cb);
2089
2090int hci_unregister_cb(struct hci_cb *cb)
2091{
2092 BT_DBG("%p name %s", cb, cb->name);
2093
2094 write_lock(&hci_cb_list_lock);
2095 list_del(&cb->list);
2096 write_unlock(&hci_cb_list_lock);
2097
2098 return 0;
2099}
2100EXPORT_SYMBOL(hci_unregister_cb);
2101
2102static int hci_send_frame(struct sk_buff *skb)
2103{
2104 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2105
2106 if (!hdev) {
2107 kfree_skb(skb);
2108 return -ENODEV;
2109 }
2110
2111 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2112
2113 /* Time stamp */
2114 __net_timestamp(skb);
2115
2116 /* Send copy to monitor */
2117 hci_send_to_monitor(hdev, skb);
2118
2119 if (atomic_read(&hdev->promisc)) {
2120 /* Send copy to the sockets */
2121 hci_send_to_sock(hdev, skb);
2122 }
2123
2124 /* Get rid of skb owner, prior to sending to the driver. */
2125 skb_orphan(skb);
2126
2127 return hdev->send(skb);
2128}
2129
2130/* Send HCI command */
2131int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2132{
2133 int len = HCI_COMMAND_HDR_SIZE + plen;
2134 struct hci_command_hdr *hdr;
2135 struct sk_buff *skb;
2136
2137 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2138
2139 skb = bt_skb_alloc(len, GFP_ATOMIC);
2140 if (!skb) {
2141 BT_ERR("%s no memory for command", hdev->name);
2142 return -ENOMEM;
2143 }
2144
2145 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2146 hdr->opcode = cpu_to_le16(opcode);
2147 hdr->plen = plen;
2148
2149 if (plen)
2150 memcpy(skb_put(skb, plen), param, plen);
2151
2152 BT_DBG("skb len %d", skb->len);
2153
2154 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2155 skb->dev = (void *) hdev;
2156
2157 if (test_bit(HCI_INIT, &hdev->flags))
2158 hdev->init_last_cmd = opcode;
2159
2160 skb_queue_tail(&hdev->cmd_q, skb);
2161 queue_work(hdev->workqueue, &hdev->cmd_work);
2162
2163 return 0;
2164}
2165
2166/* Get data from the previously sent command */
2167void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2168{
2169 struct hci_command_hdr *hdr;
2170
2171 if (!hdev->sent_cmd)
2172 return NULL;
2173
2174 hdr = (void *) hdev->sent_cmd->data;
2175
2176 if (hdr->opcode != cpu_to_le16(opcode))
2177 return NULL;
2178
2179 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2180
2181 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2182}
2183
2184/* Send ACL data */
2185static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2186{
2187 struct hci_acl_hdr *hdr;
2188 int len = skb->len;
2189
2190 skb_push(skb, HCI_ACL_HDR_SIZE);
2191 skb_reset_transport_header(skb);
2192 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2193 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2194 hdr->dlen = cpu_to_le16(len);
2195}
2196
2197static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2198 struct sk_buff *skb, __u16 flags)
2199{
2200 struct hci_dev *hdev = conn->hdev;
2201 struct sk_buff *list;
2202
2203 list = skb_shinfo(skb)->frag_list;
2204 if (!list) {
2205 /* Non fragmented */
2206 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2207
2208 skb_queue_tail(queue, skb);
2209 } else {
2210 /* Fragmented */
2211 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2212
2213 skb_shinfo(skb)->frag_list = NULL;
2214
2215 /* Queue all fragments atomically */
2216 spin_lock(&queue->lock);
2217
2218 __skb_queue_tail(queue, skb);
2219
2220 flags &= ~ACL_START;
2221 flags |= ACL_CONT;
2222 do {
2223 skb = list; list = list->next;
2224
2225 skb->dev = (void *) hdev;
2226 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2227 hci_add_acl_hdr(skb, conn->handle, flags);
2228
2229 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2230
2231 __skb_queue_tail(queue, skb);
2232 } while (list);
2233
2234 spin_unlock(&queue->lock);
2235 }
2236}
2237
2238void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2239{
2240 struct hci_conn *conn = chan->conn;
2241 struct hci_dev *hdev = conn->hdev;
2242
2243 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2244
2245 skb->dev = (void *) hdev;
2246 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2247 hci_add_acl_hdr(skb, conn->handle, flags);
2248
2249 hci_queue_acl(conn, &chan->data_q, skb, flags);
2250
2251 queue_work(hdev->workqueue, &hdev->tx_work);
2252}
2253EXPORT_SYMBOL(hci_send_acl);
2254
2255/* Send SCO data */
2256void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2257{
2258 struct hci_dev *hdev = conn->hdev;
2259 struct hci_sco_hdr hdr;
2260
2261 BT_DBG("%s len %d", hdev->name, skb->len);
2262
2263 hdr.handle = cpu_to_le16(conn->handle);
2264 hdr.dlen = skb->len;
2265
2266 skb_push(skb, HCI_SCO_HDR_SIZE);
2267 skb_reset_transport_header(skb);
2268 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2269
2270 skb->dev = (void *) hdev;
2271 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2272
2273 skb_queue_tail(&conn->data_q, skb);
2274 queue_work(hdev->workqueue, &hdev->tx_work);
2275}
2276EXPORT_SYMBOL(hci_send_sco);
2277
2278/* ---- HCI TX task (outgoing data) ---- */
2279
2280/* HCI Connection scheduler */
2281static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2282{
2283 struct hci_conn_hash *h = &hdev->conn_hash;
2284 struct hci_conn *conn = NULL, *c;
2285 int num = 0, min = ~0;
2286
2287 /* We don't have to lock device here. Connections are always
2288 * added and removed with TX task disabled. */
2289
2290 rcu_read_lock();
2291
2292 list_for_each_entry_rcu(c, &h->list, list) {
2293 if (c->type != type || skb_queue_empty(&c->data_q))
2294 continue;
2295
2296 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2297 continue;
2298
2299 num++;
2300
2301 if (c->sent < min) {
2302 min = c->sent;
2303 conn = c;
2304 }
2305
2306 if (hci_conn_num(hdev, type) == num)
2307 break;
2308 }
2309
2310 rcu_read_unlock();
2311
2312 if (conn) {
2313 int cnt, q;
2314
2315 switch (conn->type) {
2316 case ACL_LINK:
2317 cnt = hdev->acl_cnt;
2318 break;
2319 case SCO_LINK:
2320 case ESCO_LINK:
2321 cnt = hdev->sco_cnt;
2322 break;
2323 case LE_LINK:
2324 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2325 break;
2326 default:
2327 cnt = 0;
2328 BT_ERR("Unknown link type");
2329 }
2330
2331 q = cnt / num;
2332 *quote = q ? q : 1;
2333 } else
2334 *quote = 0;
2335
2336 BT_DBG("conn %p quote %d", conn, *quote);
2337 return conn;
2338}
2339
2340static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2341{
2342 struct hci_conn_hash *h = &hdev->conn_hash;
2343 struct hci_conn *c;
2344
2345 BT_ERR("%s link tx timeout", hdev->name);
2346
2347 rcu_read_lock();
2348
2349 /* Kill stalled connections */
2350 list_for_each_entry_rcu(c, &h->list, list) {
2351 if (c->type == type && c->sent) {
2352 BT_ERR("%s killing stalled connection %s",
2353 hdev->name, batostr(&c->dst));
2354 hci_acl_disconn(c, 0x13);
2355 }
2356 }
2357
2358 rcu_read_unlock();
2359}
2360
2361static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2362 int *quote)
2363{
2364 struct hci_conn_hash *h = &hdev->conn_hash;
2365 struct hci_chan *chan = NULL;
2366 int num = 0, min = ~0, cur_prio = 0;
2367 struct hci_conn *conn;
2368 int cnt, q, conn_num = 0;
2369
2370 BT_DBG("%s", hdev->name);
2371
2372 rcu_read_lock();
2373
2374 list_for_each_entry_rcu(conn, &h->list, list) {
2375 struct hci_chan *tmp;
2376
2377 if (conn->type != type)
2378 continue;
2379
2380 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2381 continue;
2382
2383 conn_num++;
2384
2385 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2386 struct sk_buff *skb;
2387
2388 if (skb_queue_empty(&tmp->data_q))
2389 continue;
2390
2391 skb = skb_peek(&tmp->data_q);
2392 if (skb->priority < cur_prio)
2393 continue;
2394
2395 if (skb->priority > cur_prio) {
2396 num = 0;
2397 min = ~0;
2398 cur_prio = skb->priority;
2399 }
2400
2401 num++;
2402
2403 if (conn->sent < min) {
2404 min = conn->sent;
2405 chan = tmp;
2406 }
2407 }
2408
2409 if (hci_conn_num(hdev, type) == conn_num)
2410 break;
2411 }
2412
2413 rcu_read_unlock();
2414
2415 if (!chan)
2416 return NULL;
2417
2418 switch (chan->conn->type) {
2419 case ACL_LINK:
2420 cnt = hdev->acl_cnt;
2421 break;
2422 case SCO_LINK:
2423 case ESCO_LINK:
2424 cnt = hdev->sco_cnt;
2425 break;
2426 case LE_LINK:
2427 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2428 break;
2429 default:
2430 cnt = 0;
2431 BT_ERR("Unknown link type");
2432 }
2433
2434 q = cnt / num;
2435 *quote = q ? q : 1;
2436 BT_DBG("chan %p quote %d", chan, *quote);
2437 return chan;
2438}
2439
2440static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2441{
2442 struct hci_conn_hash *h = &hdev->conn_hash;
2443 struct hci_conn *conn;
2444 int num = 0;
2445
2446 BT_DBG("%s", hdev->name);
2447
2448 rcu_read_lock();
2449
2450 list_for_each_entry_rcu(conn, &h->list, list) {
2451 struct hci_chan *chan;
2452
2453 if (conn->type != type)
2454 continue;
2455
2456 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2457 continue;
2458
2459 num++;
2460
2461 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2462 struct sk_buff *skb;
2463
2464 if (chan->sent) {
2465 chan->sent = 0;
2466 continue;
2467 }
2468
2469 if (skb_queue_empty(&chan->data_q))
2470 continue;
2471
2472 skb = skb_peek(&chan->data_q);
2473 if (skb->priority >= HCI_PRIO_MAX - 1)
2474 continue;
2475
2476 skb->priority = HCI_PRIO_MAX - 1;
2477
2478 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2479 skb->priority);
2480 }
2481
2482 if (hci_conn_num(hdev, type) == num)
2483 break;
2484 }
2485
2486 rcu_read_unlock();
2487
2488}
2489
2490static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2491{
2492 /* Calculate count of blocks used by this packet */
2493 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2494}
2495
2496static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2497{
2498 if (!test_bit(HCI_RAW, &hdev->flags)) {
2499 /* ACL tx timeout must be longer than maximum
2500 * link supervision timeout (40.9 seconds) */
2501 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2502 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2503 hci_link_tx_to(hdev, ACL_LINK);
2504 }
2505}
2506
2507static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2508{
2509 unsigned int cnt = hdev->acl_cnt;
2510 struct hci_chan *chan;
2511 struct sk_buff *skb;
2512 int quote;
2513
2514 __check_timeout(hdev, cnt);
2515
2516 while (hdev->acl_cnt &&
2517 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2518 u32 priority = (skb_peek(&chan->data_q))->priority;
2519 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2520 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2521 skb->len, skb->priority);
2522
2523 /* Stop if priority has changed */
2524 if (skb->priority < priority)
2525 break;
2526
2527 skb = skb_dequeue(&chan->data_q);
2528
2529 hci_conn_enter_active_mode(chan->conn,
2530 bt_cb(skb)->force_active);
2531
2532 hci_send_frame(skb);
2533 hdev->acl_last_tx = jiffies;
2534
2535 hdev->acl_cnt--;
2536 chan->sent++;
2537 chan->conn->sent++;
2538 }
2539 }
2540
2541 if (cnt != hdev->acl_cnt)
2542 hci_prio_recalculate(hdev, ACL_LINK);
2543}
2544
2545static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2546{
2547 unsigned int cnt = hdev->block_cnt;
2548 struct hci_chan *chan;
2549 struct sk_buff *skb;
2550 int quote;
2551
2552 __check_timeout(hdev, cnt);
2553
2554 while (hdev->block_cnt > 0 &&
2555 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2556 u32 priority = (skb_peek(&chan->data_q))->priority;
2557 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2558 int blocks;
2559
2560 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2561 skb->len, skb->priority);
2562
2563 /* Stop if priority has changed */
2564 if (skb->priority < priority)
2565 break;
2566
2567 skb = skb_dequeue(&chan->data_q);
2568
2569 blocks = __get_blocks(hdev, skb);
2570 if (blocks > hdev->block_cnt)
2571 return;
2572
2573 hci_conn_enter_active_mode(chan->conn,
2574 bt_cb(skb)->force_active);
2575
2576 hci_send_frame(skb);
2577 hdev->acl_last_tx = jiffies;
2578
2579 hdev->block_cnt -= blocks;
2580 quote -= blocks;
2581
2582 chan->sent += blocks;
2583 chan->conn->sent += blocks;
2584 }
2585 }
2586
2587 if (cnt != hdev->block_cnt)
2588 hci_prio_recalculate(hdev, ACL_LINK);
2589}
2590
2591static inline void hci_sched_acl(struct hci_dev *hdev)
2592{
2593 BT_DBG("%s", hdev->name);
2594
2595 if (!hci_conn_num(hdev, ACL_LINK))
2596 return;
2597
2598 switch (hdev->flow_ctl_mode) {
2599 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2600 hci_sched_acl_pkt(hdev);
2601 break;
2602
2603 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2604 hci_sched_acl_blk(hdev);
2605 break;
2606 }
2607}
2608
2609/* Schedule SCO */
2610static inline void hci_sched_sco(struct hci_dev *hdev)
2611{
2612 struct hci_conn *conn;
2613 struct sk_buff *skb;
2614 int quote;
2615
2616 BT_DBG("%s", hdev->name);
2617
2618 if (!hci_conn_num(hdev, SCO_LINK))
2619 return;
2620
2621 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2622 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2623 BT_DBG("skb %p len %d", skb, skb->len);
2624 hci_send_frame(skb);
2625
2626 conn->sent++;
2627 if (conn->sent == ~0)
2628 conn->sent = 0;
2629 }
2630 }
2631}
2632
2633static inline void hci_sched_esco(struct hci_dev *hdev)
2634{
2635 struct hci_conn *conn;
2636 struct sk_buff *skb;
2637 int quote;
2638
2639 BT_DBG("%s", hdev->name);
2640
2641 if (!hci_conn_num(hdev, ESCO_LINK))
2642 return;
2643
2644 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2645 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2646 BT_DBG("skb %p len %d", skb, skb->len);
2647 hci_send_frame(skb);
2648
2649 conn->sent++;
2650 if (conn->sent == ~0)
2651 conn->sent = 0;
2652 }
2653 }
2654}
2655
2656static inline void hci_sched_le(struct hci_dev *hdev)
2657{
2658 struct hci_chan *chan;
2659 struct sk_buff *skb;
2660 int quote, cnt, tmp;
2661
2662 BT_DBG("%s", hdev->name);
2663
2664 if (!hci_conn_num(hdev, LE_LINK))
2665 return;
2666
2667 if (!test_bit(HCI_RAW, &hdev->flags)) {
2668 /* LE tx timeout must be longer than maximum
2669 * link supervision timeout (40.9 seconds) */
2670 if (!hdev->le_cnt && hdev->le_pkts &&
2671 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2672 hci_link_tx_to(hdev, LE_LINK);
2673 }
2674
2675 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2676 tmp = cnt;
2677 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2678 u32 priority = (skb_peek(&chan->data_q))->priority;
2679 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2680 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2681 skb->len, skb->priority);
2682
2683 /* Stop if priority has changed */
2684 if (skb->priority < priority)
2685 break;
2686
2687 skb = skb_dequeue(&chan->data_q);
2688
2689 hci_send_frame(skb);
2690 hdev->le_last_tx = jiffies;
2691
2692 cnt--;
2693 chan->sent++;
2694 chan->conn->sent++;
2695 }
2696 }
2697
2698 if (hdev->le_pkts)
2699 hdev->le_cnt = cnt;
2700 else
2701 hdev->acl_cnt = cnt;
2702
2703 if (cnt != tmp)
2704 hci_prio_recalculate(hdev, LE_LINK);
2705}
2706
2707static void hci_tx_work(struct work_struct *work)
2708{
2709 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2710 struct sk_buff *skb;
2711
2712 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2713 hdev->sco_cnt, hdev->le_cnt);
2714
2715 /* Schedule queues and send stuff to HCI driver */
2716
2717 hci_sched_acl(hdev);
2718
2719 hci_sched_sco(hdev);
2720
2721 hci_sched_esco(hdev);
2722
2723 hci_sched_le(hdev);
2724
2725 /* Send next queued raw (unknown type) packet */
2726 while ((skb = skb_dequeue(&hdev->raw_q)))
2727 hci_send_frame(skb);
2728}
2729
2730/* ----- HCI RX task (incoming data processing) ----- */
2731
2732/* ACL data packet */
2733static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2734{
2735 struct hci_acl_hdr *hdr = (void *) skb->data;
2736 struct hci_conn *conn;
2737 __u16 handle, flags;
2738
2739 skb_pull(skb, HCI_ACL_HDR_SIZE);
2740
2741 handle = __le16_to_cpu(hdr->handle);
2742 flags = hci_flags(handle);
2743 handle = hci_handle(handle);
2744
2745 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2746
2747 hdev->stat.acl_rx++;
2748
2749 hci_dev_lock(hdev);
2750 conn = hci_conn_hash_lookup_handle(hdev, handle);
2751 hci_dev_unlock(hdev);
2752
2753 if (conn) {
2754 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2755
2756 /* Send to upper protocol */
2757 l2cap_recv_acldata(conn, skb, flags);
2758 return;
2759 } else {
2760 BT_ERR("%s ACL packet for unknown connection handle %d",
2761 hdev->name, handle);
2762 }
2763
2764 kfree_skb(skb);
2765}
2766
2767/* SCO data packet */
2768static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2769{
2770 struct hci_sco_hdr *hdr = (void *) skb->data;
2771 struct hci_conn *conn;
2772 __u16 handle;
2773
2774 skb_pull(skb, HCI_SCO_HDR_SIZE);
2775
2776 handle = __le16_to_cpu(hdr->handle);
2777
2778 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2779
2780 hdev->stat.sco_rx++;
2781
2782 hci_dev_lock(hdev);
2783 conn = hci_conn_hash_lookup_handle(hdev, handle);
2784 hci_dev_unlock(hdev);
2785
2786 if (conn) {
2787 /* Send to upper protocol */
2788 sco_recv_scodata(conn, skb);
2789 return;
2790 } else {
2791 BT_ERR("%s SCO packet for unknown connection handle %d",
2792 hdev->name, handle);
2793 }
2794
2795 kfree_skb(skb);
2796}
2797
2798static void hci_rx_work(struct work_struct *work)
2799{
2800 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2801 struct sk_buff *skb;
2802
2803 BT_DBG("%s", hdev->name);
2804
2805 while ((skb = skb_dequeue(&hdev->rx_q))) {
2806 /* Send copy to monitor */
2807 hci_send_to_monitor(hdev, skb);
2808
2809 if (atomic_read(&hdev->promisc)) {
2810 /* Send copy to the sockets */
2811 hci_send_to_sock(hdev, skb);
2812 }
2813
2814 if (test_bit(HCI_RAW, &hdev->flags)) {
2815 kfree_skb(skb);
2816 continue;
2817 }
2818
2819 if (test_bit(HCI_INIT, &hdev->flags)) {
2820 /* Don't process data packets in this states. */
2821 switch (bt_cb(skb)->pkt_type) {
2822 case HCI_ACLDATA_PKT:
2823 case HCI_SCODATA_PKT:
2824 kfree_skb(skb);
2825 continue;
2826 }
2827 }
2828
2829 /* Process frame */
2830 switch (bt_cb(skb)->pkt_type) {
2831 case HCI_EVENT_PKT:
2832 BT_DBG("%s Event packet", hdev->name);
2833 hci_event_packet(hdev, skb);
2834 break;
2835
2836 case HCI_ACLDATA_PKT:
2837 BT_DBG("%s ACL data packet", hdev->name);
2838 hci_acldata_packet(hdev, skb);
2839 break;
2840
2841 case HCI_SCODATA_PKT:
2842 BT_DBG("%s SCO data packet", hdev->name);
2843 hci_scodata_packet(hdev, skb);
2844 break;
2845
2846 default:
2847 kfree_skb(skb);
2848 break;
2849 }
2850 }
2851}
2852
2853static void hci_cmd_work(struct work_struct *work)
2854{
2855 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2856 struct sk_buff *skb;
2857
2858 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2859
2860 /* Send queued commands */
2861 if (atomic_read(&hdev->cmd_cnt)) {
2862 skb = skb_dequeue(&hdev->cmd_q);
2863 if (!skb)
2864 return;
2865
2866 kfree_skb(hdev->sent_cmd);
2867
2868 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2869 if (hdev->sent_cmd) {
2870 atomic_dec(&hdev->cmd_cnt);
2871 hci_send_frame(skb);
2872 if (test_bit(HCI_RESET, &hdev->flags))
2873 del_timer(&hdev->cmd_timer);
2874 else
2875 mod_timer(&hdev->cmd_timer,
2876 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2877 } else {
2878 skb_queue_head(&hdev->cmd_q, skb);
2879 queue_work(hdev->workqueue, &hdev->cmd_work);
2880 }
2881 }
2882}
2883
2884int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2885{
2886 /* General inquiry access code (GIAC) */
2887 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2888 struct hci_cp_inquiry cp;
2889
2890 BT_DBG("%s", hdev->name);
2891
2892 if (test_bit(HCI_INQUIRY, &hdev->flags))
2893 return -EINPROGRESS;
2894
2895 inquiry_cache_flush(hdev);
2896
2897 memset(&cp, 0, sizeof(cp));
2898 memcpy(&cp.lap, lap, sizeof(cp.lap));
2899 cp.length = length;
2900
2901 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2902}
2903
2904int hci_cancel_inquiry(struct hci_dev *hdev)
2905{
2906 BT_DBG("%s", hdev->name);
2907
2908 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2909 return -EPERM;
2910
2911 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2912}