Bluetooth: Fix debug printing unallocated name
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53
54 #define AUTO_OFF_TIMEOUT 2000
55
56 static void hci_rx_work(struct work_struct *work);
57 static void hci_cmd_work(struct work_struct *work);
58 static void hci_tx_work(struct work_struct *work);
59
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
63
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72 hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI requests ---- */
76
77 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
78 {
79 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
81 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
83 */
84 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
86 u16 opcode = __le16_to_cpu(sent->opcode);
87 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
96 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
97 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
105 return;
106 }
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113 }
114
115 static void hci_req_cancel(struct hci_dev *hdev, int err)
116 {
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124 }
125
126 /* Execute request and wait for completion. */
127 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
128 unsigned long opt, __u32 timeout)
129 {
130 DECLARE_WAITQUEUE(wait, current);
131 int err = 0;
132
133 BT_DBG("%s start", hdev->name);
134
135 hdev->req_status = HCI_REQ_PEND;
136
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 req(hdev, opt);
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return -EINTR;
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
150 err = -bt_to_errno(hdev->req_result);
151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
160 }
161
162 hdev->req_status = hdev->req_result = 0;
163
164 BT_DBG("%s end: err %d", hdev->name, err);
165
166 return err;
167 }
168
169 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
170 unsigned long opt, __u32 timeout)
171 {
172 int ret;
173
174 if (!test_bit(HCI_UP, &hdev->flags))
175 return -ENETDOWN;
176
177 /* Serialize all requests */
178 hci_req_lock(hdev);
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
181
182 return ret;
183 }
184
185 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186 {
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Reset device */
190 set_bit(HCI_RESET, &hdev->flags);
191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
192 }
193
194 static void bredr_init(struct hci_dev *hdev)
195 {
196 struct hci_cp_delete_stored_link_key cp;
197 __le16 param;
198 __u8 flt_type;
199
200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202 /* Mandatory initialization */
203
204 /* Reset */
205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208 }
209
210 /* Read Local Supported Features */
211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212
213 /* Read Local Version */
214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215
216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
218
219 /* Read BD Address */
220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
227
228 /* Read Voice Setting */
229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
234 flt_type = HCI_FLT_CLEAR_ALL;
235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
236
237 /* Connection accept timeout ~20 secs */
238 param = cpu_to_le16(0x7d00);
239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
244 }
245
246 static void amp_init(struct hci_dev *hdev)
247 {
248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255
256 /* Read Local AMP Info */
257 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
258 }
259
260 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
261 {
262 struct sk_buff *skb;
263
264 BT_DBG("%s %ld", hdev->name, opt);
265
266 /* Driver initialization */
267
268 /* Special commands */
269 while ((skb = skb_dequeue(&hdev->driver_init))) {
270 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
271 skb->dev = (void *) hdev;
272
273 skb_queue_tail(&hdev->cmd_q, skb);
274 queue_work(hdev->workqueue, &hdev->cmd_work);
275 }
276 skb_queue_purge(&hdev->driver_init);
277
278 switch (hdev->dev_type) {
279 case HCI_BREDR:
280 bredr_init(hdev);
281 break;
282
283 case HCI_AMP:
284 amp_init(hdev);
285 break;
286
287 default:
288 BT_ERR("Unknown device type %d", hdev->dev_type);
289 break;
290 }
291
292 }
293
294 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
295 {
296 BT_DBG("%s", hdev->name);
297
298 /* Read LE buffer size */
299 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
300 }
301
302 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
303 {
304 __u8 scan = opt;
305
306 BT_DBG("%s %x", hdev->name, scan);
307
308 /* Inquiry and Page scans */
309 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
310 }
311
312 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
313 {
314 __u8 auth = opt;
315
316 BT_DBG("%s %x", hdev->name, auth);
317
318 /* Authentication */
319 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
320 }
321
322 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
323 {
324 __u8 encrypt = opt;
325
326 BT_DBG("%s %x", hdev->name, encrypt);
327
328 /* Encryption */
329 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
330 }
331
332 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
333 {
334 __le16 policy = cpu_to_le16(opt);
335
336 BT_DBG("%s %x", hdev->name, policy);
337
338 /* Default link policy */
339 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
340 }
341
342 /* Get HCI device by index.
343 * Device is held on return. */
344 struct hci_dev *hci_dev_get(int index)
345 {
346 struct hci_dev *hdev = NULL, *d;
347
348 BT_DBG("%d", index);
349
350 if (index < 0)
351 return NULL;
352
353 read_lock(&hci_dev_list_lock);
354 list_for_each_entry(d, &hci_dev_list, list) {
355 if (d->id == index) {
356 hdev = hci_dev_hold(d);
357 break;
358 }
359 }
360 read_unlock(&hci_dev_list_lock);
361 return hdev;
362 }
363
364 /* ---- Inquiry support ---- */
365
366 bool hci_discovery_active(struct hci_dev *hdev)
367 {
368 struct discovery_state *discov = &hdev->discovery;
369
370 switch (discov->state) {
371 case DISCOVERY_FINDING:
372 case DISCOVERY_RESOLVING:
373 return true;
374
375 default:
376 return false;
377 }
378 }
379
380 void hci_discovery_set_state(struct hci_dev *hdev, int state)
381 {
382 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
383
384 if (hdev->discovery.state == state)
385 return;
386
387 switch (state) {
388 case DISCOVERY_STOPPED:
389 if (hdev->discovery.state != DISCOVERY_STARTING)
390 mgmt_discovering(hdev, 0);
391 break;
392 case DISCOVERY_STARTING:
393 break;
394 case DISCOVERY_FINDING:
395 mgmt_discovering(hdev, 1);
396 break;
397 case DISCOVERY_RESOLVING:
398 break;
399 case DISCOVERY_STOPPING:
400 break;
401 }
402
403 hdev->discovery.state = state;
404 }
405
406 static void inquiry_cache_flush(struct hci_dev *hdev)
407 {
408 struct discovery_state *cache = &hdev->discovery;
409 struct inquiry_entry *p, *n;
410
411 list_for_each_entry_safe(p, n, &cache->all, all) {
412 list_del(&p->all);
413 kfree(p);
414 }
415
416 INIT_LIST_HEAD(&cache->unknown);
417 INIT_LIST_HEAD(&cache->resolve);
418 }
419
420 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
421 {
422 struct discovery_state *cache = &hdev->discovery;
423 struct inquiry_entry *e;
424
425 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
426
427 list_for_each_entry(e, &cache->all, all) {
428 if (!bacmp(&e->data.bdaddr, bdaddr))
429 return e;
430 }
431
432 return NULL;
433 }
434
435 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
436 bdaddr_t *bdaddr)
437 {
438 struct discovery_state *cache = &hdev->discovery;
439 struct inquiry_entry *e;
440
441 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
442
443 list_for_each_entry(e, &cache->unknown, list) {
444 if (!bacmp(&e->data.bdaddr, bdaddr))
445 return e;
446 }
447
448 return NULL;
449 }
450
451 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
452 bdaddr_t *bdaddr,
453 int state)
454 {
455 struct discovery_state *cache = &hdev->discovery;
456 struct inquiry_entry *e;
457
458 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
459
460 list_for_each_entry(e, &cache->resolve, list) {
461 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
462 return e;
463 if (!bacmp(&e->data.bdaddr, bdaddr))
464 return e;
465 }
466
467 return NULL;
468 }
469
470 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
471 struct inquiry_entry *ie)
472 {
473 struct discovery_state *cache = &hdev->discovery;
474 struct list_head *pos = &cache->resolve;
475 struct inquiry_entry *p;
476
477 list_del(&ie->list);
478
479 list_for_each_entry(p, &cache->resolve, list) {
480 if (p->name_state != NAME_PENDING &&
481 abs(p->data.rssi) >= abs(ie->data.rssi))
482 break;
483 pos = &p->list;
484 }
485
486 list_add(&ie->list, pos);
487 }
488
489 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
490 bool name_known, bool *ssp)
491 {
492 struct discovery_state *cache = &hdev->discovery;
493 struct inquiry_entry *ie;
494
495 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
496
497 if (ssp)
498 *ssp = data->ssp_mode;
499
500 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
501 if (ie) {
502 if (ie->data.ssp_mode && ssp)
503 *ssp = true;
504
505 if (ie->name_state == NAME_NEEDED &&
506 data->rssi != ie->data.rssi) {
507 ie->data.rssi = data->rssi;
508 hci_inquiry_cache_update_resolve(hdev, ie);
509 }
510
511 goto update;
512 }
513
514 /* Entry not in the cache. Add new one. */
515 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
516 if (!ie)
517 return false;
518
519 list_add(&ie->all, &cache->all);
520
521 if (name_known) {
522 ie->name_state = NAME_KNOWN;
523 } else {
524 ie->name_state = NAME_NOT_KNOWN;
525 list_add(&ie->list, &cache->unknown);
526 }
527
528 update:
529 if (name_known && ie->name_state != NAME_KNOWN &&
530 ie->name_state != NAME_PENDING) {
531 ie->name_state = NAME_KNOWN;
532 list_del(&ie->list);
533 }
534
535 memcpy(&ie->data, data, sizeof(*data));
536 ie->timestamp = jiffies;
537 cache->timestamp = jiffies;
538
539 if (ie->name_state == NAME_NOT_KNOWN)
540 return false;
541
542 return true;
543 }
544
545 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
546 {
547 struct discovery_state *cache = &hdev->discovery;
548 struct inquiry_info *info = (struct inquiry_info *) buf;
549 struct inquiry_entry *e;
550 int copied = 0;
551
552 list_for_each_entry(e, &cache->all, all) {
553 struct inquiry_data *data = &e->data;
554
555 if (copied >= num)
556 break;
557
558 bacpy(&info->bdaddr, &data->bdaddr);
559 info->pscan_rep_mode = data->pscan_rep_mode;
560 info->pscan_period_mode = data->pscan_period_mode;
561 info->pscan_mode = data->pscan_mode;
562 memcpy(info->dev_class, data->dev_class, 3);
563 info->clock_offset = data->clock_offset;
564
565 info++;
566 copied++;
567 }
568
569 BT_DBG("cache %p, copied %d", cache, copied);
570 return copied;
571 }
572
573 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
574 {
575 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
576 struct hci_cp_inquiry cp;
577
578 BT_DBG("%s", hdev->name);
579
580 if (test_bit(HCI_INQUIRY, &hdev->flags))
581 return;
582
583 /* Start Inquiry */
584 memcpy(&cp.lap, &ir->lap, 3);
585 cp.length = ir->length;
586 cp.num_rsp = ir->num_rsp;
587 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
588 }
589
590 int hci_inquiry(void __user *arg)
591 {
592 __u8 __user *ptr = arg;
593 struct hci_inquiry_req ir;
594 struct hci_dev *hdev;
595 int err = 0, do_inquiry = 0, max_rsp;
596 long timeo;
597 __u8 *buf;
598
599 if (copy_from_user(&ir, ptr, sizeof(ir)))
600 return -EFAULT;
601
602 hdev = hci_dev_get(ir.dev_id);
603 if (!hdev)
604 return -ENODEV;
605
606 hci_dev_lock(hdev);
607 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
608 inquiry_cache_empty(hdev) ||
609 ir.flags & IREQ_CACHE_FLUSH) {
610 inquiry_cache_flush(hdev);
611 do_inquiry = 1;
612 }
613 hci_dev_unlock(hdev);
614
615 timeo = ir.length * msecs_to_jiffies(2000);
616
617 if (do_inquiry) {
618 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
619 if (err < 0)
620 goto done;
621 }
622
623 /* for unlimited number of responses we will use buffer with 255 entries */
624 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
625
626 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
627 * copy it to the user space.
628 */
629 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
630 if (!buf) {
631 err = -ENOMEM;
632 goto done;
633 }
634
635 hci_dev_lock(hdev);
636 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
637 hci_dev_unlock(hdev);
638
639 BT_DBG("num_rsp %d", ir.num_rsp);
640
641 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
642 ptr += sizeof(ir);
643 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
644 ir.num_rsp))
645 err = -EFAULT;
646 } else
647 err = -EFAULT;
648
649 kfree(buf);
650
651 done:
652 hci_dev_put(hdev);
653 return err;
654 }
655
656 /* ---- HCI ioctl helpers ---- */
657
658 int hci_dev_open(__u16 dev)
659 {
660 struct hci_dev *hdev;
661 int ret = 0;
662
663 hdev = hci_dev_get(dev);
664 if (!hdev)
665 return -ENODEV;
666
667 BT_DBG("%s %p", hdev->name, hdev);
668
669 hci_req_lock(hdev);
670
671 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
672 ret = -ENODEV;
673 goto done;
674 }
675
676 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
677 ret = -ERFKILL;
678 goto done;
679 }
680
681 if (test_bit(HCI_UP, &hdev->flags)) {
682 ret = -EALREADY;
683 goto done;
684 }
685
686 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
687 set_bit(HCI_RAW, &hdev->flags);
688
689 /* Treat all non BR/EDR controllers as raw devices if
690 enable_hs is not set */
691 if (hdev->dev_type != HCI_BREDR && !enable_hs)
692 set_bit(HCI_RAW, &hdev->flags);
693
694 if (hdev->open(hdev)) {
695 ret = -EIO;
696 goto done;
697 }
698
699 if (!test_bit(HCI_RAW, &hdev->flags)) {
700 atomic_set(&hdev->cmd_cnt, 1);
701 set_bit(HCI_INIT, &hdev->flags);
702 hdev->init_last_cmd = 0;
703
704 ret = __hci_request(hdev, hci_init_req, 0,
705 msecs_to_jiffies(HCI_INIT_TIMEOUT));
706
707 if (lmp_host_le_capable(hdev))
708 ret = __hci_request(hdev, hci_le_init_req, 0,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
710
711 clear_bit(HCI_INIT, &hdev->flags);
712 }
713
714 if (!ret) {
715 hci_dev_hold(hdev);
716 set_bit(HCI_UP, &hdev->flags);
717 hci_notify(hdev, HCI_DEV_UP);
718 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
719 hci_dev_lock(hdev);
720 mgmt_powered(hdev, 1);
721 hci_dev_unlock(hdev);
722 }
723 } else {
724 /* Init failed, cleanup */
725 flush_work(&hdev->tx_work);
726 flush_work(&hdev->cmd_work);
727 flush_work(&hdev->rx_work);
728
729 skb_queue_purge(&hdev->cmd_q);
730 skb_queue_purge(&hdev->rx_q);
731
732 if (hdev->flush)
733 hdev->flush(hdev);
734
735 if (hdev->sent_cmd) {
736 kfree_skb(hdev->sent_cmd);
737 hdev->sent_cmd = NULL;
738 }
739
740 hdev->close(hdev);
741 hdev->flags = 0;
742 }
743
744 done:
745 hci_req_unlock(hdev);
746 hci_dev_put(hdev);
747 return ret;
748 }
749
750 static int hci_dev_do_close(struct hci_dev *hdev)
751 {
752 BT_DBG("%s %p", hdev->name, hdev);
753
754 cancel_work_sync(&hdev->le_scan);
755
756 hci_req_cancel(hdev, ENODEV);
757 hci_req_lock(hdev);
758
759 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
760 del_timer_sync(&hdev->cmd_timer);
761 hci_req_unlock(hdev);
762 return 0;
763 }
764
765 /* Flush RX and TX works */
766 flush_work(&hdev->tx_work);
767 flush_work(&hdev->rx_work);
768
769 if (hdev->discov_timeout > 0) {
770 cancel_delayed_work(&hdev->discov_off);
771 hdev->discov_timeout = 0;
772 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
773 }
774
775 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
776 cancel_delayed_work(&hdev->service_cache);
777
778 cancel_delayed_work_sync(&hdev->le_scan_disable);
779
780 hci_dev_lock(hdev);
781 inquiry_cache_flush(hdev);
782 hci_conn_hash_flush(hdev);
783 hci_dev_unlock(hdev);
784
785 hci_notify(hdev, HCI_DEV_DOWN);
786
787 if (hdev->flush)
788 hdev->flush(hdev);
789
790 /* Reset device */
791 skb_queue_purge(&hdev->cmd_q);
792 atomic_set(&hdev->cmd_cnt, 1);
793 if (!test_bit(HCI_RAW, &hdev->flags) &&
794 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
795 set_bit(HCI_INIT, &hdev->flags);
796 __hci_request(hdev, hci_reset_req, 0,
797 msecs_to_jiffies(250));
798 clear_bit(HCI_INIT, &hdev->flags);
799 }
800
801 /* flush cmd work */
802 flush_work(&hdev->cmd_work);
803
804 /* Drop queues */
805 skb_queue_purge(&hdev->rx_q);
806 skb_queue_purge(&hdev->cmd_q);
807 skb_queue_purge(&hdev->raw_q);
808
809 /* Drop last sent command */
810 if (hdev->sent_cmd) {
811 del_timer_sync(&hdev->cmd_timer);
812 kfree_skb(hdev->sent_cmd);
813 hdev->sent_cmd = NULL;
814 }
815
816 /* After this point our queues are empty
817 * and no tasks are scheduled. */
818 hdev->close(hdev);
819
820 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
821 hci_dev_lock(hdev);
822 mgmt_powered(hdev, 0);
823 hci_dev_unlock(hdev);
824 }
825
826 /* Clear flags */
827 hdev->flags = 0;
828
829 memset(hdev->eir, 0, sizeof(hdev->eir));
830 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
831
832 hci_req_unlock(hdev);
833
834 hci_dev_put(hdev);
835 return 0;
836 }
837
838 int hci_dev_close(__u16 dev)
839 {
840 struct hci_dev *hdev;
841 int err;
842
843 hdev = hci_dev_get(dev);
844 if (!hdev)
845 return -ENODEV;
846
847 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
848 cancel_delayed_work(&hdev->power_off);
849
850 err = hci_dev_do_close(hdev);
851
852 hci_dev_put(hdev);
853 return err;
854 }
855
856 int hci_dev_reset(__u16 dev)
857 {
858 struct hci_dev *hdev;
859 int ret = 0;
860
861 hdev = hci_dev_get(dev);
862 if (!hdev)
863 return -ENODEV;
864
865 hci_req_lock(hdev);
866
867 if (!test_bit(HCI_UP, &hdev->flags))
868 goto done;
869
870 /* Drop queues */
871 skb_queue_purge(&hdev->rx_q);
872 skb_queue_purge(&hdev->cmd_q);
873
874 hci_dev_lock(hdev);
875 inquiry_cache_flush(hdev);
876 hci_conn_hash_flush(hdev);
877 hci_dev_unlock(hdev);
878
879 if (hdev->flush)
880 hdev->flush(hdev);
881
882 atomic_set(&hdev->cmd_cnt, 1);
883 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
884
885 if (!test_bit(HCI_RAW, &hdev->flags))
886 ret = __hci_request(hdev, hci_reset_req, 0,
887 msecs_to_jiffies(HCI_INIT_TIMEOUT));
888
889 done:
890 hci_req_unlock(hdev);
891 hci_dev_put(hdev);
892 return ret;
893 }
894
895 int hci_dev_reset_stat(__u16 dev)
896 {
897 struct hci_dev *hdev;
898 int ret = 0;
899
900 hdev = hci_dev_get(dev);
901 if (!hdev)
902 return -ENODEV;
903
904 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
905
906 hci_dev_put(hdev);
907
908 return ret;
909 }
910
911 int hci_dev_cmd(unsigned int cmd, void __user *arg)
912 {
913 struct hci_dev *hdev;
914 struct hci_dev_req dr;
915 int err = 0;
916
917 if (copy_from_user(&dr, arg, sizeof(dr)))
918 return -EFAULT;
919
920 hdev = hci_dev_get(dr.dev_id);
921 if (!hdev)
922 return -ENODEV;
923
924 switch (cmd) {
925 case HCISETAUTH:
926 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
927 msecs_to_jiffies(HCI_INIT_TIMEOUT));
928 break;
929
930 case HCISETENCRYPT:
931 if (!lmp_encrypt_capable(hdev)) {
932 err = -EOPNOTSUPP;
933 break;
934 }
935
936 if (!test_bit(HCI_AUTH, &hdev->flags)) {
937 /* Auth must be enabled first */
938 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
939 msecs_to_jiffies(HCI_INIT_TIMEOUT));
940 if (err)
941 break;
942 }
943
944 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
945 msecs_to_jiffies(HCI_INIT_TIMEOUT));
946 break;
947
948 case HCISETSCAN:
949 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
950 msecs_to_jiffies(HCI_INIT_TIMEOUT));
951 break;
952
953 case HCISETLINKPOL:
954 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
955 msecs_to_jiffies(HCI_INIT_TIMEOUT));
956 break;
957
958 case HCISETLINKMODE:
959 hdev->link_mode = ((__u16) dr.dev_opt) &
960 (HCI_LM_MASTER | HCI_LM_ACCEPT);
961 break;
962
963 case HCISETPTYPE:
964 hdev->pkt_type = (__u16) dr.dev_opt;
965 break;
966
967 case HCISETACLMTU:
968 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
969 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
970 break;
971
972 case HCISETSCOMTU:
973 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
974 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
975 break;
976
977 default:
978 err = -EINVAL;
979 break;
980 }
981
982 hci_dev_put(hdev);
983 return err;
984 }
985
986 int hci_get_dev_list(void __user *arg)
987 {
988 struct hci_dev *hdev;
989 struct hci_dev_list_req *dl;
990 struct hci_dev_req *dr;
991 int n = 0, size, err;
992 __u16 dev_num;
993
994 if (get_user(dev_num, (__u16 __user *) arg))
995 return -EFAULT;
996
997 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
998 return -EINVAL;
999
1000 size = sizeof(*dl) + dev_num * sizeof(*dr);
1001
1002 dl = kzalloc(size, GFP_KERNEL);
1003 if (!dl)
1004 return -ENOMEM;
1005
1006 dr = dl->dev_req;
1007
1008 read_lock(&hci_dev_list_lock);
1009 list_for_each_entry(hdev, &hci_dev_list, list) {
1010 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1011 cancel_delayed_work(&hdev->power_off);
1012
1013 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1014 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1015
1016 (dr + n)->dev_id = hdev->id;
1017 (dr + n)->dev_opt = hdev->flags;
1018
1019 if (++n >= dev_num)
1020 break;
1021 }
1022 read_unlock(&hci_dev_list_lock);
1023
1024 dl->dev_num = n;
1025 size = sizeof(*dl) + n * sizeof(*dr);
1026
1027 err = copy_to_user(arg, dl, size);
1028 kfree(dl);
1029
1030 return err ? -EFAULT : 0;
1031 }
1032
1033 int hci_get_dev_info(void __user *arg)
1034 {
1035 struct hci_dev *hdev;
1036 struct hci_dev_info di;
1037 int err = 0;
1038
1039 if (copy_from_user(&di, arg, sizeof(di)))
1040 return -EFAULT;
1041
1042 hdev = hci_dev_get(di.dev_id);
1043 if (!hdev)
1044 return -ENODEV;
1045
1046 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1047 cancel_delayed_work_sync(&hdev->power_off);
1048
1049 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1050 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1051
1052 strcpy(di.name, hdev->name);
1053 di.bdaddr = hdev->bdaddr;
1054 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1055 di.flags = hdev->flags;
1056 di.pkt_type = hdev->pkt_type;
1057 di.acl_mtu = hdev->acl_mtu;
1058 di.acl_pkts = hdev->acl_pkts;
1059 di.sco_mtu = hdev->sco_mtu;
1060 di.sco_pkts = hdev->sco_pkts;
1061 di.link_policy = hdev->link_policy;
1062 di.link_mode = hdev->link_mode;
1063
1064 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1065 memcpy(&di.features, &hdev->features, sizeof(di.features));
1066
1067 if (copy_to_user(arg, &di, sizeof(di)))
1068 err = -EFAULT;
1069
1070 hci_dev_put(hdev);
1071
1072 return err;
1073 }
1074
1075 /* ---- Interface to HCI drivers ---- */
1076
1077 static int hci_rfkill_set_block(void *data, bool blocked)
1078 {
1079 struct hci_dev *hdev = data;
1080
1081 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1082
1083 if (!blocked)
1084 return 0;
1085
1086 hci_dev_do_close(hdev);
1087
1088 return 0;
1089 }
1090
1091 static const struct rfkill_ops hci_rfkill_ops = {
1092 .set_block = hci_rfkill_set_block,
1093 };
1094
1095 /* Alloc HCI device */
1096 struct hci_dev *hci_alloc_dev(void)
1097 {
1098 struct hci_dev *hdev;
1099
1100 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1101 if (!hdev)
1102 return NULL;
1103
1104 hci_init_sysfs(hdev);
1105 skb_queue_head_init(&hdev->driver_init);
1106
1107 return hdev;
1108 }
1109 EXPORT_SYMBOL(hci_alloc_dev);
1110
1111 /* Free HCI device */
1112 void hci_free_dev(struct hci_dev *hdev)
1113 {
1114 skb_queue_purge(&hdev->driver_init);
1115
1116 /* will free via device release */
1117 put_device(&hdev->dev);
1118 }
1119 EXPORT_SYMBOL(hci_free_dev);
1120
1121 static void hci_power_on(struct work_struct *work)
1122 {
1123 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1124
1125 BT_DBG("%s", hdev->name);
1126
1127 if (hci_dev_open(hdev->id) < 0)
1128 return;
1129
1130 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1131 schedule_delayed_work(&hdev->power_off,
1132 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1133
1134 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1135 mgmt_index_added(hdev);
1136 }
1137
1138 static void hci_power_off(struct work_struct *work)
1139 {
1140 struct hci_dev *hdev = container_of(work, struct hci_dev,
1141 power_off.work);
1142
1143 BT_DBG("%s", hdev->name);
1144
1145 hci_dev_do_close(hdev);
1146 }
1147
1148 static void hci_discov_off(struct work_struct *work)
1149 {
1150 struct hci_dev *hdev;
1151 u8 scan = SCAN_PAGE;
1152
1153 hdev = container_of(work, struct hci_dev, discov_off.work);
1154
1155 BT_DBG("%s", hdev->name);
1156
1157 hci_dev_lock(hdev);
1158
1159 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1160
1161 hdev->discov_timeout = 0;
1162
1163 hci_dev_unlock(hdev);
1164 }
1165
1166 int hci_uuids_clear(struct hci_dev *hdev)
1167 {
1168 struct list_head *p, *n;
1169
1170 list_for_each_safe(p, n, &hdev->uuids) {
1171 struct bt_uuid *uuid;
1172
1173 uuid = list_entry(p, struct bt_uuid, list);
1174
1175 list_del(p);
1176 kfree(uuid);
1177 }
1178
1179 return 0;
1180 }
1181
1182 int hci_link_keys_clear(struct hci_dev *hdev)
1183 {
1184 struct list_head *p, *n;
1185
1186 list_for_each_safe(p, n, &hdev->link_keys) {
1187 struct link_key *key;
1188
1189 key = list_entry(p, struct link_key, list);
1190
1191 list_del(p);
1192 kfree(key);
1193 }
1194
1195 return 0;
1196 }
1197
1198 int hci_smp_ltks_clear(struct hci_dev *hdev)
1199 {
1200 struct smp_ltk *k, *tmp;
1201
1202 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1203 list_del(&k->list);
1204 kfree(k);
1205 }
1206
1207 return 0;
1208 }
1209
1210 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1211 {
1212 struct link_key *k;
1213
1214 list_for_each_entry(k, &hdev->link_keys, list)
1215 if (bacmp(bdaddr, &k->bdaddr) == 0)
1216 return k;
1217
1218 return NULL;
1219 }
1220
1221 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1222 u8 key_type, u8 old_key_type)
1223 {
1224 /* Legacy key */
1225 if (key_type < 0x03)
1226 return true;
1227
1228 /* Debug keys are insecure so don't store them persistently */
1229 if (key_type == HCI_LK_DEBUG_COMBINATION)
1230 return false;
1231
1232 /* Changed combination key and there's no previous one */
1233 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1234 return false;
1235
1236 /* Security mode 3 case */
1237 if (!conn)
1238 return true;
1239
1240 /* Neither local nor remote side had no-bonding as requirement */
1241 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1242 return true;
1243
1244 /* Local side had dedicated bonding as requirement */
1245 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1246 return true;
1247
1248 /* Remote side had dedicated bonding as requirement */
1249 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1250 return true;
1251
1252 /* If none of the above criteria match, then don't store the key
1253 * persistently */
1254 return false;
1255 }
1256
1257 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1258 {
1259 struct smp_ltk *k;
1260
1261 list_for_each_entry(k, &hdev->long_term_keys, list) {
1262 if (k->ediv != ediv ||
1263 memcmp(rand, k->rand, sizeof(k->rand)))
1264 continue;
1265
1266 return k;
1267 }
1268
1269 return NULL;
1270 }
1271 EXPORT_SYMBOL(hci_find_ltk);
1272
1273 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1274 u8 addr_type)
1275 {
1276 struct smp_ltk *k;
1277
1278 list_for_each_entry(k, &hdev->long_term_keys, list)
1279 if (addr_type == k->bdaddr_type &&
1280 bacmp(bdaddr, &k->bdaddr) == 0)
1281 return k;
1282
1283 return NULL;
1284 }
1285 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1286
1287 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1288 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1289 {
1290 struct link_key *key, *old_key;
1291 u8 old_key_type;
1292 bool persistent;
1293
1294 old_key = hci_find_link_key(hdev, bdaddr);
1295 if (old_key) {
1296 old_key_type = old_key->type;
1297 key = old_key;
1298 } else {
1299 old_key_type = conn ? conn->key_type : 0xff;
1300 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1301 if (!key)
1302 return -ENOMEM;
1303 list_add(&key->list, &hdev->link_keys);
1304 }
1305
1306 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1307
1308 /* Some buggy controller combinations generate a changed
1309 * combination key for legacy pairing even when there's no
1310 * previous key */
1311 if (type == HCI_LK_CHANGED_COMBINATION &&
1312 (!conn || conn->remote_auth == 0xff) &&
1313 old_key_type == 0xff) {
1314 type = HCI_LK_COMBINATION;
1315 if (conn)
1316 conn->key_type = type;
1317 }
1318
1319 bacpy(&key->bdaddr, bdaddr);
1320 memcpy(key->val, val, 16);
1321 key->pin_len = pin_len;
1322
1323 if (type == HCI_LK_CHANGED_COMBINATION)
1324 key->type = old_key_type;
1325 else
1326 key->type = type;
1327
1328 if (!new_key)
1329 return 0;
1330
1331 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1332
1333 mgmt_new_link_key(hdev, key, persistent);
1334
1335 if (conn)
1336 conn->flush_key = !persistent;
1337
1338 return 0;
1339 }
1340
1341 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1342 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1343 ediv, u8 rand[8])
1344 {
1345 struct smp_ltk *key, *old_key;
1346
1347 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1348 return 0;
1349
1350 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1351 if (old_key)
1352 key = old_key;
1353 else {
1354 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1355 if (!key)
1356 return -ENOMEM;
1357 list_add(&key->list, &hdev->long_term_keys);
1358 }
1359
1360 bacpy(&key->bdaddr, bdaddr);
1361 key->bdaddr_type = addr_type;
1362 memcpy(key->val, tk, sizeof(key->val));
1363 key->authenticated = authenticated;
1364 key->ediv = ediv;
1365 key->enc_size = enc_size;
1366 key->type = type;
1367 memcpy(key->rand, rand, sizeof(key->rand));
1368
1369 if (!new_key)
1370 return 0;
1371
1372 if (type & HCI_SMP_LTK)
1373 mgmt_new_ltk(hdev, key, 1);
1374
1375 return 0;
1376 }
1377
1378 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1379 {
1380 struct link_key *key;
1381
1382 key = hci_find_link_key(hdev, bdaddr);
1383 if (!key)
1384 return -ENOENT;
1385
1386 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1387
1388 list_del(&key->list);
1389 kfree(key);
1390
1391 return 0;
1392 }
1393
1394 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1395 {
1396 struct smp_ltk *k, *tmp;
1397
1398 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1399 if (bacmp(bdaddr, &k->bdaddr))
1400 continue;
1401
1402 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1403
1404 list_del(&k->list);
1405 kfree(k);
1406 }
1407
1408 return 0;
1409 }
1410
1411 /* HCI command timer function */
1412 static void hci_cmd_timer(unsigned long arg)
1413 {
1414 struct hci_dev *hdev = (void *) arg;
1415
1416 BT_ERR("%s command tx timeout", hdev->name);
1417 atomic_set(&hdev->cmd_cnt, 1);
1418 queue_work(hdev->workqueue, &hdev->cmd_work);
1419 }
1420
1421 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1422 bdaddr_t *bdaddr)
1423 {
1424 struct oob_data *data;
1425
1426 list_for_each_entry(data, &hdev->remote_oob_data, list)
1427 if (bacmp(bdaddr, &data->bdaddr) == 0)
1428 return data;
1429
1430 return NULL;
1431 }
1432
1433 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1434 {
1435 struct oob_data *data;
1436
1437 data = hci_find_remote_oob_data(hdev, bdaddr);
1438 if (!data)
1439 return -ENOENT;
1440
1441 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1442
1443 list_del(&data->list);
1444 kfree(data);
1445
1446 return 0;
1447 }
1448
1449 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1450 {
1451 struct oob_data *data, *n;
1452
1453 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1454 list_del(&data->list);
1455 kfree(data);
1456 }
1457
1458 return 0;
1459 }
1460
1461 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1462 u8 *randomizer)
1463 {
1464 struct oob_data *data;
1465
1466 data = hci_find_remote_oob_data(hdev, bdaddr);
1467
1468 if (!data) {
1469 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1470 if (!data)
1471 return -ENOMEM;
1472
1473 bacpy(&data->bdaddr, bdaddr);
1474 list_add(&data->list, &hdev->remote_oob_data);
1475 }
1476
1477 memcpy(data->hash, hash, sizeof(data->hash));
1478 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1479
1480 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1481
1482 return 0;
1483 }
1484
1485 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1486 {
1487 struct bdaddr_list *b;
1488
1489 list_for_each_entry(b, &hdev->blacklist, list)
1490 if (bacmp(bdaddr, &b->bdaddr) == 0)
1491 return b;
1492
1493 return NULL;
1494 }
1495
1496 int hci_blacklist_clear(struct hci_dev *hdev)
1497 {
1498 struct list_head *p, *n;
1499
1500 list_for_each_safe(p, n, &hdev->blacklist) {
1501 struct bdaddr_list *b;
1502
1503 b = list_entry(p, struct bdaddr_list, list);
1504
1505 list_del(p);
1506 kfree(b);
1507 }
1508
1509 return 0;
1510 }
1511
1512 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1513 {
1514 struct bdaddr_list *entry;
1515
1516 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1517 return -EBADF;
1518
1519 if (hci_blacklist_lookup(hdev, bdaddr))
1520 return -EEXIST;
1521
1522 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1523 if (!entry)
1524 return -ENOMEM;
1525
1526 bacpy(&entry->bdaddr, bdaddr);
1527
1528 list_add(&entry->list, &hdev->blacklist);
1529
1530 return mgmt_device_blocked(hdev, bdaddr, type);
1531 }
1532
1533 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1534 {
1535 struct bdaddr_list *entry;
1536
1537 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1538 return hci_blacklist_clear(hdev);
1539
1540 entry = hci_blacklist_lookup(hdev, bdaddr);
1541 if (!entry)
1542 return -ENOENT;
1543
1544 list_del(&entry->list);
1545 kfree(entry);
1546
1547 return mgmt_device_unblocked(hdev, bdaddr, type);
1548 }
1549
1550 static void hci_clear_adv_cache(struct work_struct *work)
1551 {
1552 struct hci_dev *hdev = container_of(work, struct hci_dev,
1553 adv_work.work);
1554
1555 hci_dev_lock(hdev);
1556
1557 hci_adv_entries_clear(hdev);
1558
1559 hci_dev_unlock(hdev);
1560 }
1561
1562 int hci_adv_entries_clear(struct hci_dev *hdev)
1563 {
1564 struct adv_entry *entry, *tmp;
1565
1566 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1567 list_del(&entry->list);
1568 kfree(entry);
1569 }
1570
1571 BT_DBG("%s adv cache cleared", hdev->name);
1572
1573 return 0;
1574 }
1575
1576 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1577 {
1578 struct adv_entry *entry;
1579
1580 list_for_each_entry(entry, &hdev->adv_entries, list)
1581 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1582 return entry;
1583
1584 return NULL;
1585 }
1586
1587 static inline int is_connectable_adv(u8 evt_type)
1588 {
1589 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1590 return 1;
1591
1592 return 0;
1593 }
1594
1595 int hci_add_adv_entry(struct hci_dev *hdev,
1596 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
1597 return -EINVAL;
1598
1599 /* Only new entries should be added to adv_entries. So, if
1600 * bdaddr was found, don't add it. */
1601 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1602 return 0;
1603
1604 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1605 if (!entry)
1606 return -ENOMEM;
1607
1608 bacpy(&entry->bdaddr, &ev->bdaddr);
1609 entry->bdaddr_type = ev->bdaddr_type;
1610
1611 list_add(&entry->list, &hdev->adv_entries);
1612
1613 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1614 batostr(&entry->bdaddr), entry->bdaddr_type);
1615
1616 return 0;
1617 }
1618
1619 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1620 {
1621 struct le_scan_params *param = (struct le_scan_params *) opt;
1622 struct hci_cp_le_set_scan_param cp;
1623
1624 memset(&cp, 0, sizeof(cp));
1625 cp.type = param->type;
1626 cp.interval = cpu_to_le16(param->interval);
1627 cp.window = cpu_to_le16(param->window);
1628
1629 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1630 }
1631
1632 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1633 {
1634 struct hci_cp_le_set_scan_enable cp;
1635
1636 memset(&cp, 0, sizeof(cp));
1637 cp.enable = 1;
1638
1639 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1640 }
1641
1642 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1643 u16 window, int timeout)
1644 {
1645 long timeo = msecs_to_jiffies(3000);
1646 struct le_scan_params param;
1647 int err;
1648
1649 BT_DBG("%s", hdev->name);
1650
1651 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1652 return -EINPROGRESS;
1653
1654 param.type = type;
1655 param.interval = interval;
1656 param.window = window;
1657
1658 hci_req_lock(hdev);
1659
1660 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1661 timeo);
1662 if (!err)
1663 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1664
1665 hci_req_unlock(hdev);
1666
1667 if (err < 0)
1668 return err;
1669
1670 schedule_delayed_work(&hdev->le_scan_disable,
1671 msecs_to_jiffies(timeout));
1672
1673 return 0;
1674 }
1675
1676 int hci_cancel_le_scan(struct hci_dev *hdev)
1677 {
1678 BT_DBG("%s", hdev->name);
1679
1680 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1681 return -EALREADY;
1682
1683 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1684 struct hci_cp_le_set_scan_enable cp;
1685
1686 /* Send HCI command to disable LE Scan */
1687 memset(&cp, 0, sizeof(cp));
1688 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1689 }
1690
1691 return 0;
1692 }
1693
1694 static void le_scan_disable_work(struct work_struct *work)
1695 {
1696 struct hci_dev *hdev = container_of(work, struct hci_dev,
1697 le_scan_disable.work);
1698 struct hci_cp_le_set_scan_enable cp;
1699
1700 BT_DBG("%s", hdev->name);
1701
1702 memset(&cp, 0, sizeof(cp));
1703
1704 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1705 }
1706
1707 static void le_scan_work(struct work_struct *work)
1708 {
1709 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1710 struct le_scan_params *param = &hdev->le_scan_params;
1711
1712 BT_DBG("%s", hdev->name);
1713
1714 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1715 param->timeout);
1716 }
1717
1718 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1719 int timeout)
1720 {
1721 struct le_scan_params *param = &hdev->le_scan_params;
1722
1723 BT_DBG("%s", hdev->name);
1724
1725 if (work_busy(&hdev->le_scan))
1726 return -EINPROGRESS;
1727
1728 param->type = type;
1729 param->interval = interval;
1730 param->window = window;
1731 param->timeout = timeout;
1732
1733 queue_work(system_long_wq, &hdev->le_scan);
1734
1735 return 0;
1736 }
1737
1738 /* Register HCI device */
1739 int hci_register_dev(struct hci_dev *hdev)
1740 {
1741 struct list_head *head = &hci_dev_list, *p;
1742 int i, id, error;
1743
1744 if (!hdev->open || !hdev->close)
1745 return -EINVAL;
1746
1747 /* Do not allow HCI_AMP devices to register at index 0,
1748 * so the index can be used as the AMP controller ID.
1749 */
1750 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1751
1752 write_lock(&hci_dev_list_lock);
1753
1754 /* Find first available device id */
1755 list_for_each(p, &hci_dev_list) {
1756 if (list_entry(p, struct hci_dev, list)->id != id)
1757 break;
1758 head = p; id++;
1759 }
1760
1761 sprintf(hdev->name, "hci%d", id);
1762 hdev->id = id;
1763
1764 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1765
1766 list_add_tail(&hdev->list, head);
1767
1768 mutex_init(&hdev->lock);
1769
1770 hdev->flags = 0;
1771 hdev->dev_flags = 0;
1772 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1773 hdev->esco_type = (ESCO_HV1);
1774 hdev->link_mode = (HCI_LM_ACCEPT);
1775 hdev->io_capability = 0x03; /* No Input No Output */
1776
1777 hdev->idle_timeout = 0;
1778 hdev->sniff_max_interval = 800;
1779 hdev->sniff_min_interval = 80;
1780
1781 INIT_WORK(&hdev->rx_work, hci_rx_work);
1782 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1783 INIT_WORK(&hdev->tx_work, hci_tx_work);
1784
1785
1786 skb_queue_head_init(&hdev->rx_q);
1787 skb_queue_head_init(&hdev->cmd_q);
1788 skb_queue_head_init(&hdev->raw_q);
1789
1790 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1791
1792 for (i = 0; i < NUM_REASSEMBLY; i++)
1793 hdev->reassembly[i] = NULL;
1794
1795 init_waitqueue_head(&hdev->req_wait_q);
1796 mutex_init(&hdev->req_lock);
1797
1798 discovery_init(hdev);
1799
1800 hci_conn_hash_init(hdev);
1801
1802 INIT_LIST_HEAD(&hdev->mgmt_pending);
1803
1804 INIT_LIST_HEAD(&hdev->blacklist);
1805
1806 INIT_LIST_HEAD(&hdev->uuids);
1807
1808 INIT_LIST_HEAD(&hdev->link_keys);
1809 INIT_LIST_HEAD(&hdev->long_term_keys);
1810
1811 INIT_LIST_HEAD(&hdev->remote_oob_data);
1812
1813 INIT_LIST_HEAD(&hdev->adv_entries);
1814
1815 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1816 INIT_WORK(&hdev->power_on, hci_power_on);
1817 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1818
1819 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1820
1821 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1822
1823 atomic_set(&hdev->promisc, 0);
1824
1825 INIT_WORK(&hdev->le_scan, le_scan_work);
1826
1827 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1828
1829 write_unlock(&hci_dev_list_lock);
1830
1831 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1832 WQ_MEM_RECLAIM, 1);
1833 if (!hdev->workqueue) {
1834 error = -ENOMEM;
1835 goto err;
1836 }
1837
1838 error = hci_add_sysfs(hdev);
1839 if (error < 0)
1840 goto err_wqueue;
1841
1842 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1843 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1844 if (hdev->rfkill) {
1845 if (rfkill_register(hdev->rfkill) < 0) {
1846 rfkill_destroy(hdev->rfkill);
1847 hdev->rfkill = NULL;
1848 }
1849 }
1850
1851 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1852 set_bit(HCI_SETUP, &hdev->dev_flags);
1853 schedule_work(&hdev->power_on);
1854
1855 hci_notify(hdev, HCI_DEV_REG);
1856 hci_dev_hold(hdev);
1857
1858 return id;
1859
1860 err_wqueue:
1861 destroy_workqueue(hdev->workqueue);
1862 err:
1863 write_lock(&hci_dev_list_lock);
1864 list_del(&hdev->list);
1865 write_unlock(&hci_dev_list_lock);
1866
1867 return error;
1868 }
1869 EXPORT_SYMBOL(hci_register_dev);
1870
1871 /* Unregister HCI device */
1872 void hci_unregister_dev(struct hci_dev *hdev)
1873 {
1874 int i;
1875
1876 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1877
1878 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1879
1880 write_lock(&hci_dev_list_lock);
1881 list_del(&hdev->list);
1882 write_unlock(&hci_dev_list_lock);
1883
1884 hci_dev_do_close(hdev);
1885
1886 for (i = 0; i < NUM_REASSEMBLY; i++)
1887 kfree_skb(hdev->reassembly[i]);
1888
1889 if (!test_bit(HCI_INIT, &hdev->flags) &&
1890 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1891 hci_dev_lock(hdev);
1892 mgmt_index_removed(hdev);
1893 hci_dev_unlock(hdev);
1894 }
1895
1896 /* mgmt_index_removed should take care of emptying the
1897 * pending list */
1898 BUG_ON(!list_empty(&hdev->mgmt_pending));
1899
1900 hci_notify(hdev, HCI_DEV_UNREG);
1901
1902 if (hdev->rfkill) {
1903 rfkill_unregister(hdev->rfkill);
1904 rfkill_destroy(hdev->rfkill);
1905 }
1906
1907 hci_del_sysfs(hdev);
1908
1909 cancel_delayed_work_sync(&hdev->adv_work);
1910
1911 destroy_workqueue(hdev->workqueue);
1912
1913 hci_dev_lock(hdev);
1914 hci_blacklist_clear(hdev);
1915 hci_uuids_clear(hdev);
1916 hci_link_keys_clear(hdev);
1917 hci_smp_ltks_clear(hdev);
1918 hci_remote_oob_data_clear(hdev);
1919 hci_adv_entries_clear(hdev);
1920 hci_dev_unlock(hdev);
1921
1922 hci_dev_put(hdev);
1923 }
1924 EXPORT_SYMBOL(hci_unregister_dev);
1925
1926 /* Suspend HCI device */
1927 int hci_suspend_dev(struct hci_dev *hdev)
1928 {
1929 hci_notify(hdev, HCI_DEV_SUSPEND);
1930 return 0;
1931 }
1932 EXPORT_SYMBOL(hci_suspend_dev);
1933
1934 /* Resume HCI device */
1935 int hci_resume_dev(struct hci_dev *hdev)
1936 {
1937 hci_notify(hdev, HCI_DEV_RESUME);
1938 return 0;
1939 }
1940 EXPORT_SYMBOL(hci_resume_dev);
1941
1942 /* Receive frame from HCI drivers */
1943 int hci_recv_frame(struct sk_buff *skb)
1944 {
1945 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1946 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1947 && !test_bit(HCI_INIT, &hdev->flags))) {
1948 kfree_skb(skb);
1949 return -ENXIO;
1950 }
1951
1952 /* Incomming skb */
1953 bt_cb(skb)->incoming = 1;
1954
1955 /* Time stamp */
1956 __net_timestamp(skb);
1957
1958 skb_queue_tail(&hdev->rx_q, skb);
1959 queue_work(hdev->workqueue, &hdev->rx_work);
1960
1961 return 0;
1962 }
1963 EXPORT_SYMBOL(hci_recv_frame);
1964
1965 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1966 int count, __u8 index)
1967 {
1968 int len = 0;
1969 int hlen = 0;
1970 int remain = count;
1971 struct sk_buff *skb;
1972 struct bt_skb_cb *scb;
1973
1974 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1975 index >= NUM_REASSEMBLY)
1976 return -EILSEQ;
1977
1978 skb = hdev->reassembly[index];
1979
1980 if (!skb) {
1981 switch (type) {
1982 case HCI_ACLDATA_PKT:
1983 len = HCI_MAX_FRAME_SIZE;
1984 hlen = HCI_ACL_HDR_SIZE;
1985 break;
1986 case HCI_EVENT_PKT:
1987 len = HCI_MAX_EVENT_SIZE;
1988 hlen = HCI_EVENT_HDR_SIZE;
1989 break;
1990 case HCI_SCODATA_PKT:
1991 len = HCI_MAX_SCO_SIZE;
1992 hlen = HCI_SCO_HDR_SIZE;
1993 break;
1994 }
1995
1996 skb = bt_skb_alloc(len, GFP_ATOMIC);
1997 if (!skb)
1998 return -ENOMEM;
1999
2000 scb = (void *) skb->cb;
2001 scb->expect = hlen;
2002 scb->pkt_type = type;
2003
2004 skb->dev = (void *) hdev;
2005 hdev->reassembly[index] = skb;
2006 }
2007
2008 while (count) {
2009 scb = (void *) skb->cb;
2010 len = min_t(uint, scb->expect, count);
2011
2012 memcpy(skb_put(skb, len), data, len);
2013
2014 count -= len;
2015 data += len;
2016 scb->expect -= len;
2017 remain = count;
2018
2019 switch (type) {
2020 case HCI_EVENT_PKT:
2021 if (skb->len == HCI_EVENT_HDR_SIZE) {
2022 struct hci_event_hdr *h = hci_event_hdr(skb);
2023 scb->expect = h->plen;
2024
2025 if (skb_tailroom(skb) < scb->expect) {
2026 kfree_skb(skb);
2027 hdev->reassembly[index] = NULL;
2028 return -ENOMEM;
2029 }
2030 }
2031 break;
2032
2033 case HCI_ACLDATA_PKT:
2034 if (skb->len == HCI_ACL_HDR_SIZE) {
2035 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2036 scb->expect = __le16_to_cpu(h->dlen);
2037
2038 if (skb_tailroom(skb) < scb->expect) {
2039 kfree_skb(skb);
2040 hdev->reassembly[index] = NULL;
2041 return -ENOMEM;
2042 }
2043 }
2044 break;
2045
2046 case HCI_SCODATA_PKT:
2047 if (skb->len == HCI_SCO_HDR_SIZE) {
2048 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2049 scb->expect = h->dlen;
2050
2051 if (skb_tailroom(skb) < scb->expect) {
2052 kfree_skb(skb);
2053 hdev->reassembly[index] = NULL;
2054 return -ENOMEM;
2055 }
2056 }
2057 break;
2058 }
2059
2060 if (scb->expect == 0) {
2061 /* Complete frame */
2062
2063 bt_cb(skb)->pkt_type = type;
2064 hci_recv_frame(skb);
2065
2066 hdev->reassembly[index] = NULL;
2067 return remain;
2068 }
2069 }
2070
2071 return remain;
2072 }
2073
2074 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2075 {
2076 int rem = 0;
2077
2078 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2079 return -EILSEQ;
2080
2081 while (count) {
2082 rem = hci_reassembly(hdev, type, data, count, type - 1);
2083 if (rem < 0)
2084 return rem;
2085
2086 data += (count - rem);
2087 count = rem;
2088 }
2089
2090 return rem;
2091 }
2092 EXPORT_SYMBOL(hci_recv_fragment);
2093
2094 #define STREAM_REASSEMBLY 0
2095
2096 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2097 {
2098 int type;
2099 int rem = 0;
2100
2101 while (count) {
2102 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2103
2104 if (!skb) {
2105 struct { char type; } *pkt;
2106
2107 /* Start of the frame */
2108 pkt = data;
2109 type = pkt->type;
2110
2111 data++;
2112 count--;
2113 } else
2114 type = bt_cb(skb)->pkt_type;
2115
2116 rem = hci_reassembly(hdev, type, data, count,
2117 STREAM_REASSEMBLY);
2118 if (rem < 0)
2119 return rem;
2120
2121 data += (count - rem);
2122 count = rem;
2123 }
2124
2125 return rem;
2126 }
2127 EXPORT_SYMBOL(hci_recv_stream_fragment);
2128
2129 /* ---- Interface to upper protocols ---- */
2130
2131 int hci_register_cb(struct hci_cb *cb)
2132 {
2133 BT_DBG("%p name %s", cb, cb->name);
2134
2135 write_lock(&hci_cb_list_lock);
2136 list_add(&cb->list, &hci_cb_list);
2137 write_unlock(&hci_cb_list_lock);
2138
2139 return 0;
2140 }
2141 EXPORT_SYMBOL(hci_register_cb);
2142
2143 int hci_unregister_cb(struct hci_cb *cb)
2144 {
2145 BT_DBG("%p name %s", cb, cb->name);
2146
2147 write_lock(&hci_cb_list_lock);
2148 list_del(&cb->list);
2149 write_unlock(&hci_cb_list_lock);
2150
2151 return 0;
2152 }
2153 EXPORT_SYMBOL(hci_unregister_cb);
2154
2155 static int hci_send_frame(struct sk_buff *skb)
2156 {
2157 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2158
2159 if (!hdev) {
2160 kfree_skb(skb);
2161 return -ENODEV;
2162 }
2163
2164 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2165
2166 /* Time stamp */
2167 __net_timestamp(skb);
2168
2169 /* Send copy to monitor */
2170 hci_send_to_monitor(hdev, skb);
2171
2172 if (atomic_read(&hdev->promisc)) {
2173 /* Send copy to the sockets */
2174 hci_send_to_sock(hdev, skb);
2175 }
2176
2177 /* Get rid of skb owner, prior to sending to the driver. */
2178 skb_orphan(skb);
2179
2180 return hdev->send(skb);
2181 }
2182
2183 /* Send HCI command */
2184 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2185 {
2186 int len = HCI_COMMAND_HDR_SIZE + plen;
2187 struct hci_command_hdr *hdr;
2188 struct sk_buff *skb;
2189
2190 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2191
2192 skb = bt_skb_alloc(len, GFP_ATOMIC);
2193 if (!skb) {
2194 BT_ERR("%s no memory for command", hdev->name);
2195 return -ENOMEM;
2196 }
2197
2198 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2199 hdr->opcode = cpu_to_le16(opcode);
2200 hdr->plen = plen;
2201
2202 if (plen)
2203 memcpy(skb_put(skb, plen), param, plen);
2204
2205 BT_DBG("skb len %d", skb->len);
2206
2207 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2208 skb->dev = (void *) hdev;
2209
2210 if (test_bit(HCI_INIT, &hdev->flags))
2211 hdev->init_last_cmd = opcode;
2212
2213 skb_queue_tail(&hdev->cmd_q, skb);
2214 queue_work(hdev->workqueue, &hdev->cmd_work);
2215
2216 return 0;
2217 }
2218
2219 /* Get data from the previously sent command */
2220 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2221 {
2222 struct hci_command_hdr *hdr;
2223
2224 if (!hdev->sent_cmd)
2225 return NULL;
2226
2227 hdr = (void *) hdev->sent_cmd->data;
2228
2229 if (hdr->opcode != cpu_to_le16(opcode))
2230 return NULL;
2231
2232 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2233
2234 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2235 }
2236
2237 /* Send ACL data */
2238 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2239 {
2240 struct hci_acl_hdr *hdr;
2241 int len = skb->len;
2242
2243 skb_push(skb, HCI_ACL_HDR_SIZE);
2244 skb_reset_transport_header(skb);
2245 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2246 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2247 hdr->dlen = cpu_to_le16(len);
2248 }
2249
2250 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2251 struct sk_buff *skb, __u16 flags)
2252 {
2253 struct hci_dev *hdev = conn->hdev;
2254 struct sk_buff *list;
2255
2256 list = skb_shinfo(skb)->frag_list;
2257 if (!list) {
2258 /* Non fragmented */
2259 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2260
2261 skb_queue_tail(queue, skb);
2262 } else {
2263 /* Fragmented */
2264 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2265
2266 skb_shinfo(skb)->frag_list = NULL;
2267
2268 /* Queue all fragments atomically */
2269 spin_lock(&queue->lock);
2270
2271 __skb_queue_tail(queue, skb);
2272
2273 flags &= ~ACL_START;
2274 flags |= ACL_CONT;
2275 do {
2276 skb = list; list = list->next;
2277
2278 skb->dev = (void *) hdev;
2279 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2280 hci_add_acl_hdr(skb, conn->handle, flags);
2281
2282 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2283
2284 __skb_queue_tail(queue, skb);
2285 } while (list);
2286
2287 spin_unlock(&queue->lock);
2288 }
2289 }
2290
2291 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2292 {
2293 struct hci_conn *conn = chan->conn;
2294 struct hci_dev *hdev = conn->hdev;
2295
2296 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2297
2298 skb->dev = (void *) hdev;
2299 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2300 hci_add_acl_hdr(skb, conn->handle, flags);
2301
2302 hci_queue_acl(conn, &chan->data_q, skb, flags);
2303
2304 queue_work(hdev->workqueue, &hdev->tx_work);
2305 }
2306 EXPORT_SYMBOL(hci_send_acl);
2307
2308 /* Send SCO data */
2309 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2310 {
2311 struct hci_dev *hdev = conn->hdev;
2312 struct hci_sco_hdr hdr;
2313
2314 BT_DBG("%s len %d", hdev->name, skb->len);
2315
2316 hdr.handle = cpu_to_le16(conn->handle);
2317 hdr.dlen = skb->len;
2318
2319 skb_push(skb, HCI_SCO_HDR_SIZE);
2320 skb_reset_transport_header(skb);
2321 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2322
2323 skb->dev = (void *) hdev;
2324 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2325
2326 skb_queue_tail(&conn->data_q, skb);
2327 queue_work(hdev->workqueue, &hdev->tx_work);
2328 }
2329 EXPORT_SYMBOL(hci_send_sco);
2330
2331 /* ---- HCI TX task (outgoing data) ---- */
2332
2333 /* HCI Connection scheduler */
2334 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2335 {
2336 struct hci_conn_hash *h = &hdev->conn_hash;
2337 struct hci_conn *conn = NULL, *c;
2338 unsigned int num = 0, min = ~0;
2339
2340 /* We don't have to lock device here. Connections are always
2341 * added and removed with TX task disabled. */
2342
2343 rcu_read_lock();
2344
2345 list_for_each_entry_rcu(c, &h->list, list) {
2346 if (c->type != type || skb_queue_empty(&c->data_q))
2347 continue;
2348
2349 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2350 continue;
2351
2352 num++;
2353
2354 if (c->sent < min) {
2355 min = c->sent;
2356 conn = c;
2357 }
2358
2359 if (hci_conn_num(hdev, type) == num)
2360 break;
2361 }
2362
2363 rcu_read_unlock();
2364
2365 if (conn) {
2366 int cnt, q;
2367
2368 switch (conn->type) {
2369 case ACL_LINK:
2370 cnt = hdev->acl_cnt;
2371 break;
2372 case SCO_LINK:
2373 case ESCO_LINK:
2374 cnt = hdev->sco_cnt;
2375 break;
2376 case LE_LINK:
2377 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2378 break;
2379 default:
2380 cnt = 0;
2381 BT_ERR("Unknown link type");
2382 }
2383
2384 q = cnt / num;
2385 *quote = q ? q : 1;
2386 } else
2387 *quote = 0;
2388
2389 BT_DBG("conn %p quote %d", conn, *quote);
2390 return conn;
2391 }
2392
2393 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2394 {
2395 struct hci_conn_hash *h = &hdev->conn_hash;
2396 struct hci_conn *c;
2397
2398 BT_ERR("%s link tx timeout", hdev->name);
2399
2400 rcu_read_lock();
2401
2402 /* Kill stalled connections */
2403 list_for_each_entry_rcu(c, &h->list, list) {
2404 if (c->type == type && c->sent) {
2405 BT_ERR("%s killing stalled connection %s",
2406 hdev->name, batostr(&c->dst));
2407 hci_acl_disconn(c, 0x13);
2408 }
2409 }
2410
2411 rcu_read_unlock();
2412 }
2413
2414 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2415 int *quote)
2416 {
2417 struct hci_conn_hash *h = &hdev->conn_hash;
2418 struct hci_chan *chan = NULL;
2419 unsigned int num = 0, min = ~0, cur_prio = 0;
2420 struct hci_conn *conn;
2421 int cnt, q, conn_num = 0;
2422
2423 BT_DBG("%s", hdev->name);
2424
2425 rcu_read_lock();
2426
2427 list_for_each_entry_rcu(conn, &h->list, list) {
2428 struct hci_chan *tmp;
2429
2430 if (conn->type != type)
2431 continue;
2432
2433 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2434 continue;
2435
2436 conn_num++;
2437
2438 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2439 struct sk_buff *skb;
2440
2441 if (skb_queue_empty(&tmp->data_q))
2442 continue;
2443
2444 skb = skb_peek(&tmp->data_q);
2445 if (skb->priority < cur_prio)
2446 continue;
2447
2448 if (skb->priority > cur_prio) {
2449 num = 0;
2450 min = ~0;
2451 cur_prio = skb->priority;
2452 }
2453
2454 num++;
2455
2456 if (conn->sent < min) {
2457 min = conn->sent;
2458 chan = tmp;
2459 }
2460 }
2461
2462 if (hci_conn_num(hdev, type) == conn_num)
2463 break;
2464 }
2465
2466 rcu_read_unlock();
2467
2468 if (!chan)
2469 return NULL;
2470
2471 switch (chan->conn->type) {
2472 case ACL_LINK:
2473 cnt = hdev->acl_cnt;
2474 break;
2475 case SCO_LINK:
2476 case ESCO_LINK:
2477 cnt = hdev->sco_cnt;
2478 break;
2479 case LE_LINK:
2480 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2481 break;
2482 default:
2483 cnt = 0;
2484 BT_ERR("Unknown link type");
2485 }
2486
2487 q = cnt / num;
2488 *quote = q ? q : 1;
2489 BT_DBG("chan %p quote %d", chan, *quote);
2490 return chan;
2491 }
2492
2493 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2494 {
2495 struct hci_conn_hash *h = &hdev->conn_hash;
2496 struct hci_conn *conn;
2497 int num = 0;
2498
2499 BT_DBG("%s", hdev->name);
2500
2501 rcu_read_lock();
2502
2503 list_for_each_entry_rcu(conn, &h->list, list) {
2504 struct hci_chan *chan;
2505
2506 if (conn->type != type)
2507 continue;
2508
2509 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2510 continue;
2511
2512 num++;
2513
2514 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2515 struct sk_buff *skb;
2516
2517 if (chan->sent) {
2518 chan->sent = 0;
2519 continue;
2520 }
2521
2522 if (skb_queue_empty(&chan->data_q))
2523 continue;
2524
2525 skb = skb_peek(&chan->data_q);
2526 if (skb->priority >= HCI_PRIO_MAX - 1)
2527 continue;
2528
2529 skb->priority = HCI_PRIO_MAX - 1;
2530
2531 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2532 skb->priority);
2533 }
2534
2535 if (hci_conn_num(hdev, type) == num)
2536 break;
2537 }
2538
2539 rcu_read_unlock();
2540
2541 }
2542
2543 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2544 {
2545 /* Calculate count of blocks used by this packet */
2546 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2547 }
2548
2549 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2550 {
2551 if (!test_bit(HCI_RAW, &hdev->flags)) {
2552 /* ACL tx timeout must be longer than maximum
2553 * link supervision timeout (40.9 seconds) */
2554 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2555 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2556 hci_link_tx_to(hdev, ACL_LINK);
2557 }
2558 }
2559
2560 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2561 {
2562 unsigned int cnt = hdev->acl_cnt;
2563 struct hci_chan *chan;
2564 struct sk_buff *skb;
2565 int quote;
2566
2567 __check_timeout(hdev, cnt);
2568
2569 while (hdev->acl_cnt &&
2570 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2571 u32 priority = (skb_peek(&chan->data_q))->priority;
2572 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2573 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2574 skb->len, skb->priority);
2575
2576 /* Stop if priority has changed */
2577 if (skb->priority < priority)
2578 break;
2579
2580 skb = skb_dequeue(&chan->data_q);
2581
2582 hci_conn_enter_active_mode(chan->conn,
2583 bt_cb(skb)->force_active);
2584
2585 hci_send_frame(skb);
2586 hdev->acl_last_tx = jiffies;
2587
2588 hdev->acl_cnt--;
2589 chan->sent++;
2590 chan->conn->sent++;
2591 }
2592 }
2593
2594 if (cnt != hdev->acl_cnt)
2595 hci_prio_recalculate(hdev, ACL_LINK);
2596 }
2597
2598 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2599 {
2600 unsigned int cnt = hdev->block_cnt;
2601 struct hci_chan *chan;
2602 struct sk_buff *skb;
2603 int quote;
2604
2605 __check_timeout(hdev, cnt);
2606
2607 while (hdev->block_cnt > 0 &&
2608 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2609 u32 priority = (skb_peek(&chan->data_q))->priority;
2610 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2611 int blocks;
2612
2613 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2614 skb->len, skb->priority);
2615
2616 /* Stop if priority has changed */
2617 if (skb->priority < priority)
2618 break;
2619
2620 skb = skb_dequeue(&chan->data_q);
2621
2622 blocks = __get_blocks(hdev, skb);
2623 if (blocks > hdev->block_cnt)
2624 return;
2625
2626 hci_conn_enter_active_mode(chan->conn,
2627 bt_cb(skb)->force_active);
2628
2629 hci_send_frame(skb);
2630 hdev->acl_last_tx = jiffies;
2631
2632 hdev->block_cnt -= blocks;
2633 quote -= blocks;
2634
2635 chan->sent += blocks;
2636 chan->conn->sent += blocks;
2637 }
2638 }
2639
2640 if (cnt != hdev->block_cnt)
2641 hci_prio_recalculate(hdev, ACL_LINK);
2642 }
2643
2644 static inline void hci_sched_acl(struct hci_dev *hdev)
2645 {
2646 BT_DBG("%s", hdev->name);
2647
2648 if (!hci_conn_num(hdev, ACL_LINK))
2649 return;
2650
2651 switch (hdev->flow_ctl_mode) {
2652 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2653 hci_sched_acl_pkt(hdev);
2654 break;
2655
2656 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2657 hci_sched_acl_blk(hdev);
2658 break;
2659 }
2660 }
2661
2662 /* Schedule SCO */
2663 static inline void hci_sched_sco(struct hci_dev *hdev)
2664 {
2665 struct hci_conn *conn;
2666 struct sk_buff *skb;
2667 int quote;
2668
2669 BT_DBG("%s", hdev->name);
2670
2671 if (!hci_conn_num(hdev, SCO_LINK))
2672 return;
2673
2674 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2675 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2676 BT_DBG("skb %p len %d", skb, skb->len);
2677 hci_send_frame(skb);
2678
2679 conn->sent++;
2680 if (conn->sent == ~0)
2681 conn->sent = 0;
2682 }
2683 }
2684 }
2685
2686 static inline void hci_sched_esco(struct hci_dev *hdev)
2687 {
2688 struct hci_conn *conn;
2689 struct sk_buff *skb;
2690 int quote;
2691
2692 BT_DBG("%s", hdev->name);
2693
2694 if (!hci_conn_num(hdev, ESCO_LINK))
2695 return;
2696
2697 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2698 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2699 BT_DBG("skb %p len %d", skb, skb->len);
2700 hci_send_frame(skb);
2701
2702 conn->sent++;
2703 if (conn->sent == ~0)
2704 conn->sent = 0;
2705 }
2706 }
2707 }
2708
2709 static inline void hci_sched_le(struct hci_dev *hdev)
2710 {
2711 struct hci_chan *chan;
2712 struct sk_buff *skb;
2713 int quote, cnt, tmp;
2714
2715 BT_DBG("%s", hdev->name);
2716
2717 if (!hci_conn_num(hdev, LE_LINK))
2718 return;
2719
2720 if (!test_bit(HCI_RAW, &hdev->flags)) {
2721 /* LE tx timeout must be longer than maximum
2722 * link supervision timeout (40.9 seconds) */
2723 if (!hdev->le_cnt && hdev->le_pkts &&
2724 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2725 hci_link_tx_to(hdev, LE_LINK);
2726 }
2727
2728 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2729 tmp = cnt;
2730 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2731 u32 priority = (skb_peek(&chan->data_q))->priority;
2732 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2733 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2734 skb->len, skb->priority);
2735
2736 /* Stop if priority has changed */
2737 if (skb->priority < priority)
2738 break;
2739
2740 skb = skb_dequeue(&chan->data_q);
2741
2742 hci_send_frame(skb);
2743 hdev->le_last_tx = jiffies;
2744
2745 cnt--;
2746 chan->sent++;
2747 chan->conn->sent++;
2748 }
2749 }
2750
2751 if (hdev->le_pkts)
2752 hdev->le_cnt = cnt;
2753 else
2754 hdev->acl_cnt = cnt;
2755
2756 if (cnt != tmp)
2757 hci_prio_recalculate(hdev, LE_LINK);
2758 }
2759
2760 static void hci_tx_work(struct work_struct *work)
2761 {
2762 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2763 struct sk_buff *skb;
2764
2765 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2766 hdev->sco_cnt, hdev->le_cnt);
2767
2768 /* Schedule queues and send stuff to HCI driver */
2769
2770 hci_sched_acl(hdev);
2771
2772 hci_sched_sco(hdev);
2773
2774 hci_sched_esco(hdev);
2775
2776 hci_sched_le(hdev);
2777
2778 /* Send next queued raw (unknown type) packet */
2779 while ((skb = skb_dequeue(&hdev->raw_q)))
2780 hci_send_frame(skb);
2781 }
2782
2783 /* ----- HCI RX task (incoming data processing) ----- */
2784
2785 /* ACL data packet */
2786 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2787 {
2788 struct hci_acl_hdr *hdr = (void *) skb->data;
2789 struct hci_conn *conn;
2790 __u16 handle, flags;
2791
2792 skb_pull(skb, HCI_ACL_HDR_SIZE);
2793
2794 handle = __le16_to_cpu(hdr->handle);
2795 flags = hci_flags(handle);
2796 handle = hci_handle(handle);
2797
2798 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2799
2800 hdev->stat.acl_rx++;
2801
2802 hci_dev_lock(hdev);
2803 conn = hci_conn_hash_lookup_handle(hdev, handle);
2804 hci_dev_unlock(hdev);
2805
2806 if (conn) {
2807 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2808
2809 /* Send to upper protocol */
2810 l2cap_recv_acldata(conn, skb, flags);
2811 return;
2812 } else {
2813 BT_ERR("%s ACL packet for unknown connection handle %d",
2814 hdev->name, handle);
2815 }
2816
2817 kfree_skb(skb);
2818 }
2819
2820 /* SCO data packet */
2821 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2822 {
2823 struct hci_sco_hdr *hdr = (void *) skb->data;
2824 struct hci_conn *conn;
2825 __u16 handle;
2826
2827 skb_pull(skb, HCI_SCO_HDR_SIZE);
2828
2829 handle = __le16_to_cpu(hdr->handle);
2830
2831 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2832
2833 hdev->stat.sco_rx++;
2834
2835 hci_dev_lock(hdev);
2836 conn = hci_conn_hash_lookup_handle(hdev, handle);
2837 hci_dev_unlock(hdev);
2838
2839 if (conn) {
2840 /* Send to upper protocol */
2841 sco_recv_scodata(conn, skb);
2842 return;
2843 } else {
2844 BT_ERR("%s SCO packet for unknown connection handle %d",
2845 hdev->name, handle);
2846 }
2847
2848 kfree_skb(skb);
2849 }
2850
2851 static void hci_rx_work(struct work_struct *work)
2852 {
2853 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2854 struct sk_buff *skb;
2855
2856 BT_DBG("%s", hdev->name);
2857
2858 while ((skb = skb_dequeue(&hdev->rx_q))) {
2859 /* Send copy to monitor */
2860 hci_send_to_monitor(hdev, skb);
2861
2862 if (atomic_read(&hdev->promisc)) {
2863 /* Send copy to the sockets */
2864 hci_send_to_sock(hdev, skb);
2865 }
2866
2867 if (test_bit(HCI_RAW, &hdev->flags)) {
2868 kfree_skb(skb);
2869 continue;
2870 }
2871
2872 if (test_bit(HCI_INIT, &hdev->flags)) {
2873 /* Don't process data packets in this states. */
2874 switch (bt_cb(skb)->pkt_type) {
2875 case HCI_ACLDATA_PKT:
2876 case HCI_SCODATA_PKT:
2877 kfree_skb(skb);
2878 continue;
2879 }
2880 }
2881
2882 /* Process frame */
2883 switch (bt_cb(skb)->pkt_type) {
2884 case HCI_EVENT_PKT:
2885 BT_DBG("%s Event packet", hdev->name);
2886 hci_event_packet(hdev, skb);
2887 break;
2888
2889 case HCI_ACLDATA_PKT:
2890 BT_DBG("%s ACL data packet", hdev->name);
2891 hci_acldata_packet(hdev, skb);
2892 break;
2893
2894 case HCI_SCODATA_PKT:
2895 BT_DBG("%s SCO data packet", hdev->name);
2896 hci_scodata_packet(hdev, skb);
2897 break;
2898
2899 default:
2900 kfree_skb(skb);
2901 break;
2902 }
2903 }
2904 }
2905
2906 static void hci_cmd_work(struct work_struct *work)
2907 {
2908 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2909 struct sk_buff *skb;
2910
2911 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2912
2913 /* Send queued commands */
2914 if (atomic_read(&hdev->cmd_cnt)) {
2915 skb = skb_dequeue(&hdev->cmd_q);
2916 if (!skb)
2917 return;
2918
2919 kfree_skb(hdev->sent_cmd);
2920
2921 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2922 if (hdev->sent_cmd) {
2923 atomic_dec(&hdev->cmd_cnt);
2924 hci_send_frame(skb);
2925 if (test_bit(HCI_RESET, &hdev->flags))
2926 del_timer(&hdev->cmd_timer);
2927 else
2928 mod_timer(&hdev->cmd_timer,
2929 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2930 } else {
2931 skb_queue_head(&hdev->cmd_q, skb);
2932 queue_work(hdev->workqueue, &hdev->cmd_work);
2933 }
2934 }
2935 }
2936
2937 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2938 {
2939 /* General inquiry access code (GIAC) */
2940 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2941 struct hci_cp_inquiry cp;
2942
2943 BT_DBG("%s", hdev->name);
2944
2945 if (test_bit(HCI_INQUIRY, &hdev->flags))
2946 return -EINPROGRESS;
2947
2948 inquiry_cache_flush(hdev);
2949
2950 memset(&cp, 0, sizeof(cp));
2951 memcpy(&cp.lap, lap, sizeof(cp.lap));
2952 cp.length = length;
2953
2954 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2955 }
2956
2957 int hci_cancel_inquiry(struct hci_dev *hdev)
2958 {
2959 BT_DBG("%s", hdev->name);
2960
2961 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2962 return -EALREADY;
2963
2964 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2965 }