Bluetooth: Clear non-persistent flags when closing HCI device
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
28#include <linux/export.h>
29#include <linux/idr.h>
30
31#include <linux/rfkill.h>
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
36static void hci_rx_work(struct work_struct *work);
37static void hci_cmd_work(struct work_struct *work);
38static void hci_tx_work(struct work_struct *work);
39
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
51/* ---- HCI notifications ---- */
52
53static void hci_notify(struct hci_dev *hdev, int event)
54{
55 hci_sock_dev_event(hdev, event);
56}
57
58/* ---- HCI requests ---- */
59
60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61{
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
83static int __hci_req_sync(struct hci_dev *hdev,
84 void (*func)(struct hci_request *req,
85 unsigned long opt),
86 unsigned long opt, __u32 timeout)
87{
88 struct hci_request req;
89 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
94 hci_req_init(&req, hdev);
95
96 hdev->req_status = HCI_REQ_PEND;
97
98 func(&req, opt);
99
100 err = hci_req_run(&req, hci_req_sync_complete);
101 if (err < 0) {
102 hdev->req_status = 0;
103
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
108 */
109 if (err == -ENODATA)
110 return 0;
111
112 return err;
113 }
114
115 add_wait_queue(&hdev->req_wait_q, &wait);
116 set_current_state(TASK_INTERRUPTIBLE);
117
118 schedule_timeout(timeout);
119
120 remove_wait_queue(&hdev->req_wait_q, &wait);
121
122 if (signal_pending(current))
123 return -EINTR;
124
125 switch (hdev->req_status) {
126 case HCI_REQ_DONE:
127 err = -bt_to_errno(hdev->req_result);
128 break;
129
130 case HCI_REQ_CANCELED:
131 err = -hdev->req_result;
132 break;
133
134 default:
135 err = -ETIMEDOUT;
136 break;
137 }
138
139 hdev->req_status = hdev->req_result = 0;
140
141 BT_DBG("%s end: err %d", hdev->name, err);
142
143 return err;
144}
145
146static int hci_req_sync(struct hci_dev *hdev,
147 void (*req)(struct hci_request *req,
148 unsigned long opt),
149 unsigned long opt, __u32 timeout)
150{
151 int ret;
152
153 if (!test_bit(HCI_UP, &hdev->flags))
154 return -ENETDOWN;
155
156 /* Serialize all requests */
157 hci_req_lock(hdev);
158 ret = __hci_req_sync(hdev, req, opt, timeout);
159 hci_req_unlock(hdev);
160
161 return ret;
162}
163
164static void hci_reset_req(struct hci_request *req, unsigned long opt)
165{
166 BT_DBG("%s %ld", req->hdev->name, opt);
167
168 /* Reset device */
169 set_bit(HCI_RESET, &req->hdev->flags);
170 hci_req_add(req, HCI_OP_RESET, 0, NULL);
171}
172
173static void bredr_init(struct hci_request *req)
174{
175 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
176
177 /* Read Local Supported Features */
178 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
179
180 /* Read Local Version */
181 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
182
183 /* Read BD Address */
184 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
185}
186
187static void amp_init(struct hci_request *req)
188{
189 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
190
191 /* Read Local Version */
192 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
193
194 /* Read Local AMP Info */
195 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
196
197 /* Read Data Blk size */
198 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
199}
200
201static void hci_init1_req(struct hci_request *req, unsigned long opt)
202{
203 struct hci_dev *hdev = req->hdev;
204 struct hci_request init_req;
205 struct sk_buff *skb;
206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
211 hci_req_init(&init_req, hdev);
212
213 /* Special commands */
214 while ((skb = skb_dequeue(&hdev->driver_init))) {
215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216 skb->dev = (void *) hdev;
217
218 if (skb_queue_empty(&init_req.cmd_q))
219 bt_cb(skb)->req.start = true;
220
221 skb_queue_tail(&init_req.cmd_q, skb);
222 }
223 skb_queue_purge(&hdev->driver_init);
224
225 hci_req_run(&init_req, NULL);
226
227 /* Reset */
228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
229 hci_reset_req(req, 0);
230
231 switch (hdev->dev_type) {
232 case HCI_BREDR:
233 bredr_init(req);
234 break;
235
236 case HCI_AMP:
237 amp_init(req);
238 break;
239
240 default:
241 BT_ERR("Unknown device type %d", hdev->dev_type);
242 break;
243 }
244}
245
246static void bredr_setup(struct hci_request *req)
247{
248 struct hci_cp_delete_stored_link_key cp;
249 __le16 param;
250 __u8 flt_type;
251
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
253 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
254
255 /* Read Class of Device */
256 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
257
258 /* Read Local Name */
259 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
260
261 /* Read Voice Setting */
262 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
263
264 /* Clear Event Filters */
265 flt_type = HCI_FLT_CLEAR_ALL;
266 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
267
268 /* Connection accept timeout ~20 secs */
269 param = __constant_cpu_to_le16(0x7d00);
270 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
271
272 bacpy(&cp.bdaddr, BDADDR_ANY);
273 cp.delete_all = 0x01;
274 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
275}
276
277static void le_setup(struct hci_request *req)
278{
279 /* Read LE Buffer Size */
280 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
281
282 /* Read LE Local Supported Features */
283 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
284
285 /* Read LE Advertising Channel TX Power */
286 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
287
288 /* Read LE White List Size */
289 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
290
291 /* Read LE Supported States */
292 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
293}
294
295static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
296{
297 if (lmp_ext_inq_capable(hdev))
298 return 0x02;
299
300 if (lmp_inq_rssi_capable(hdev))
301 return 0x01;
302
303 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
304 hdev->lmp_subver == 0x0757)
305 return 0x01;
306
307 if (hdev->manufacturer == 15) {
308 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
309 return 0x01;
310 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
311 return 0x01;
312 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
313 return 0x01;
314 }
315
316 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
317 hdev->lmp_subver == 0x1805)
318 return 0x01;
319
320 return 0x00;
321}
322
323static void hci_setup_inquiry_mode(struct hci_request *req)
324{
325 u8 mode;
326
327 mode = hci_get_inquiry_mode(req->hdev);
328
329 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
330}
331
332static void hci_setup_event_mask(struct hci_request *req)
333{
334 struct hci_dev *hdev = req->hdev;
335
336 /* The second byte is 0xff instead of 0x9f (two reserved bits
337 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
338 * command otherwise.
339 */
340 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
341
342 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
343 * any event mask for pre 1.2 devices.
344 */
345 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
346 return;
347
348 if (lmp_bredr_capable(hdev)) {
349 events[4] |= 0x01; /* Flow Specification Complete */
350 events[4] |= 0x02; /* Inquiry Result with RSSI */
351 events[4] |= 0x04; /* Read Remote Extended Features Complete */
352 events[5] |= 0x08; /* Synchronous Connection Complete */
353 events[5] |= 0x10; /* Synchronous Connection Changed */
354 }
355
356 if (lmp_inq_rssi_capable(hdev))
357 events[4] |= 0x02; /* Inquiry Result with RSSI */
358
359 if (lmp_sniffsubr_capable(hdev))
360 events[5] |= 0x20; /* Sniff Subrating */
361
362 if (lmp_pause_enc_capable(hdev))
363 events[5] |= 0x80; /* Encryption Key Refresh Complete */
364
365 if (lmp_ext_inq_capable(hdev))
366 events[5] |= 0x40; /* Extended Inquiry Result */
367
368 if (lmp_no_flush_capable(hdev))
369 events[7] |= 0x01; /* Enhanced Flush Complete */
370
371 if (lmp_lsto_capable(hdev))
372 events[6] |= 0x80; /* Link Supervision Timeout Changed */
373
374 if (lmp_ssp_capable(hdev)) {
375 events[6] |= 0x01; /* IO Capability Request */
376 events[6] |= 0x02; /* IO Capability Response */
377 events[6] |= 0x04; /* User Confirmation Request */
378 events[6] |= 0x08; /* User Passkey Request */
379 events[6] |= 0x10; /* Remote OOB Data Request */
380 events[6] |= 0x20; /* Simple Pairing Complete */
381 events[7] |= 0x04; /* User Passkey Notification */
382 events[7] |= 0x08; /* Keypress Notification */
383 events[7] |= 0x10; /* Remote Host Supported
384 * Features Notification
385 */
386 }
387
388 if (lmp_le_capable(hdev))
389 events[7] |= 0x20; /* LE Meta-Event */
390
391 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
392
393 if (lmp_le_capable(hdev)) {
394 memset(events, 0, sizeof(events));
395 events[0] = 0x1f;
396 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
397 sizeof(events), events);
398 }
399}
400
401static void hci_init2_req(struct hci_request *req, unsigned long opt)
402{
403 struct hci_dev *hdev = req->hdev;
404
405 if (lmp_bredr_capable(hdev))
406 bredr_setup(req);
407
408 if (lmp_le_capable(hdev))
409 le_setup(req);
410
411 hci_setup_event_mask(req);
412
413 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
414 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
415
416 if (lmp_ssp_capable(hdev)) {
417 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
418 u8 mode = 0x01;
419 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
420 sizeof(mode), &mode);
421 } else {
422 struct hci_cp_write_eir cp;
423
424 memset(hdev->eir, 0, sizeof(hdev->eir));
425 memset(&cp, 0, sizeof(cp));
426
427 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
428 }
429 }
430
431 if (lmp_inq_rssi_capable(hdev))
432 hci_setup_inquiry_mode(req);
433
434 if (lmp_inq_tx_pwr_capable(hdev))
435 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
436
437 if (lmp_ext_feat_capable(hdev)) {
438 struct hci_cp_read_local_ext_features cp;
439
440 cp.page = 0x01;
441 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
442 sizeof(cp), &cp);
443 }
444
445 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
446 u8 enable = 1;
447 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
448 &enable);
449 }
450}
451
452static void hci_setup_link_policy(struct hci_request *req)
453{
454 struct hci_dev *hdev = req->hdev;
455 struct hci_cp_write_def_link_policy cp;
456 u16 link_policy = 0;
457
458 if (lmp_rswitch_capable(hdev))
459 link_policy |= HCI_LP_RSWITCH;
460 if (lmp_hold_capable(hdev))
461 link_policy |= HCI_LP_HOLD;
462 if (lmp_sniff_capable(hdev))
463 link_policy |= HCI_LP_SNIFF;
464 if (lmp_park_capable(hdev))
465 link_policy |= HCI_LP_PARK;
466
467 cp.policy = cpu_to_le16(link_policy);
468 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
469}
470
471static void hci_set_le_support(struct hci_request *req)
472{
473 struct hci_dev *hdev = req->hdev;
474 struct hci_cp_write_le_host_supported cp;
475
476 memset(&cp, 0, sizeof(cp));
477
478 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
479 cp.le = 0x01;
480 cp.simul = lmp_le_br_capable(hdev);
481 }
482
483 if (cp.le != lmp_host_le_capable(hdev))
484 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
485 &cp);
486}
487
488static void hci_init3_req(struct hci_request *req, unsigned long opt)
489{
490 struct hci_dev *hdev = req->hdev;
491
492 if (hdev->commands[5] & 0x10)
493 hci_setup_link_policy(req);
494
495 if (lmp_le_capable(hdev))
496 hci_set_le_support(req);
497}
498
499static int __hci_init(struct hci_dev *hdev)
500{
501 int err;
502
503 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
504 if (err < 0)
505 return err;
506
507 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
508 * BR/EDR/LE type controllers. AMP controllers only need the
509 * first stage init.
510 */
511 if (hdev->dev_type != HCI_BREDR)
512 return 0;
513
514 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
515 if (err < 0)
516 return err;
517
518 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
519}
520
521static void hci_scan_req(struct hci_request *req, unsigned long opt)
522{
523 __u8 scan = opt;
524
525 BT_DBG("%s %x", req->hdev->name, scan);
526
527 /* Inquiry and Page scans */
528 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
529}
530
531static void hci_auth_req(struct hci_request *req, unsigned long opt)
532{
533 __u8 auth = opt;
534
535 BT_DBG("%s %x", req->hdev->name, auth);
536
537 /* Authentication */
538 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
539}
540
541static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
542{
543 __u8 encrypt = opt;
544
545 BT_DBG("%s %x", req->hdev->name, encrypt);
546
547 /* Encryption */
548 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
549}
550
551static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
552{
553 __le16 policy = cpu_to_le16(opt);
554
555 BT_DBG("%s %x", req->hdev->name, policy);
556
557 /* Default link policy */
558 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
559}
560
561/* Get HCI device by index.
562 * Device is held on return. */
563struct hci_dev *hci_dev_get(int index)
564{
565 struct hci_dev *hdev = NULL, *d;
566
567 BT_DBG("%d", index);
568
569 if (index < 0)
570 return NULL;
571
572 read_lock(&hci_dev_list_lock);
573 list_for_each_entry(d, &hci_dev_list, list) {
574 if (d->id == index) {
575 hdev = hci_dev_hold(d);
576 break;
577 }
578 }
579 read_unlock(&hci_dev_list_lock);
580 return hdev;
581}
582
583/* ---- Inquiry support ---- */
584
585bool hci_discovery_active(struct hci_dev *hdev)
586{
587 struct discovery_state *discov = &hdev->discovery;
588
589 switch (discov->state) {
590 case DISCOVERY_FINDING:
591 case DISCOVERY_RESOLVING:
592 return true;
593
594 default:
595 return false;
596 }
597}
598
599void hci_discovery_set_state(struct hci_dev *hdev, int state)
600{
601 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
602
603 if (hdev->discovery.state == state)
604 return;
605
606 switch (state) {
607 case DISCOVERY_STOPPED:
608 if (hdev->discovery.state != DISCOVERY_STARTING)
609 mgmt_discovering(hdev, 0);
610 break;
611 case DISCOVERY_STARTING:
612 break;
613 case DISCOVERY_FINDING:
614 mgmt_discovering(hdev, 1);
615 break;
616 case DISCOVERY_RESOLVING:
617 break;
618 case DISCOVERY_STOPPING:
619 break;
620 }
621
622 hdev->discovery.state = state;
623}
624
625static void inquiry_cache_flush(struct hci_dev *hdev)
626{
627 struct discovery_state *cache = &hdev->discovery;
628 struct inquiry_entry *p, *n;
629
630 list_for_each_entry_safe(p, n, &cache->all, all) {
631 list_del(&p->all);
632 kfree(p);
633 }
634
635 INIT_LIST_HEAD(&cache->unknown);
636 INIT_LIST_HEAD(&cache->resolve);
637}
638
639struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
640 bdaddr_t *bdaddr)
641{
642 struct discovery_state *cache = &hdev->discovery;
643 struct inquiry_entry *e;
644
645 BT_DBG("cache %p, %pMR", cache, bdaddr);
646
647 list_for_each_entry(e, &cache->all, all) {
648 if (!bacmp(&e->data.bdaddr, bdaddr))
649 return e;
650 }
651
652 return NULL;
653}
654
655struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
656 bdaddr_t *bdaddr)
657{
658 struct discovery_state *cache = &hdev->discovery;
659 struct inquiry_entry *e;
660
661 BT_DBG("cache %p, %pMR", cache, bdaddr);
662
663 list_for_each_entry(e, &cache->unknown, list) {
664 if (!bacmp(&e->data.bdaddr, bdaddr))
665 return e;
666 }
667
668 return NULL;
669}
670
671struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
672 bdaddr_t *bdaddr,
673 int state)
674{
675 struct discovery_state *cache = &hdev->discovery;
676 struct inquiry_entry *e;
677
678 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
679
680 list_for_each_entry(e, &cache->resolve, list) {
681 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
682 return e;
683 if (!bacmp(&e->data.bdaddr, bdaddr))
684 return e;
685 }
686
687 return NULL;
688}
689
690void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
691 struct inquiry_entry *ie)
692{
693 struct discovery_state *cache = &hdev->discovery;
694 struct list_head *pos = &cache->resolve;
695 struct inquiry_entry *p;
696
697 list_del(&ie->list);
698
699 list_for_each_entry(p, &cache->resolve, list) {
700 if (p->name_state != NAME_PENDING &&
701 abs(p->data.rssi) >= abs(ie->data.rssi))
702 break;
703 pos = &p->list;
704 }
705
706 list_add(&ie->list, pos);
707}
708
709bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
710 bool name_known, bool *ssp)
711{
712 struct discovery_state *cache = &hdev->discovery;
713 struct inquiry_entry *ie;
714
715 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
716
717 hci_remove_remote_oob_data(hdev, &data->bdaddr);
718
719 if (ssp)
720 *ssp = data->ssp_mode;
721
722 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
723 if (ie) {
724 if (ie->data.ssp_mode && ssp)
725 *ssp = true;
726
727 if (ie->name_state == NAME_NEEDED &&
728 data->rssi != ie->data.rssi) {
729 ie->data.rssi = data->rssi;
730 hci_inquiry_cache_update_resolve(hdev, ie);
731 }
732
733 goto update;
734 }
735
736 /* Entry not in the cache. Add new one. */
737 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
738 if (!ie)
739 return false;
740
741 list_add(&ie->all, &cache->all);
742
743 if (name_known) {
744 ie->name_state = NAME_KNOWN;
745 } else {
746 ie->name_state = NAME_NOT_KNOWN;
747 list_add(&ie->list, &cache->unknown);
748 }
749
750update:
751 if (name_known && ie->name_state != NAME_KNOWN &&
752 ie->name_state != NAME_PENDING) {
753 ie->name_state = NAME_KNOWN;
754 list_del(&ie->list);
755 }
756
757 memcpy(&ie->data, data, sizeof(*data));
758 ie->timestamp = jiffies;
759 cache->timestamp = jiffies;
760
761 if (ie->name_state == NAME_NOT_KNOWN)
762 return false;
763
764 return true;
765}
766
767static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
768{
769 struct discovery_state *cache = &hdev->discovery;
770 struct inquiry_info *info = (struct inquiry_info *) buf;
771 struct inquiry_entry *e;
772 int copied = 0;
773
774 list_for_each_entry(e, &cache->all, all) {
775 struct inquiry_data *data = &e->data;
776
777 if (copied >= num)
778 break;
779
780 bacpy(&info->bdaddr, &data->bdaddr);
781 info->pscan_rep_mode = data->pscan_rep_mode;
782 info->pscan_period_mode = data->pscan_period_mode;
783 info->pscan_mode = data->pscan_mode;
784 memcpy(info->dev_class, data->dev_class, 3);
785 info->clock_offset = data->clock_offset;
786
787 info++;
788 copied++;
789 }
790
791 BT_DBG("cache %p, copied %d", cache, copied);
792 return copied;
793}
794
795static void hci_inq_req(struct hci_request *req, unsigned long opt)
796{
797 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
798 struct hci_dev *hdev = req->hdev;
799 struct hci_cp_inquiry cp;
800
801 BT_DBG("%s", hdev->name);
802
803 if (test_bit(HCI_INQUIRY, &hdev->flags))
804 return;
805
806 /* Start Inquiry */
807 memcpy(&cp.lap, &ir->lap, 3);
808 cp.length = ir->length;
809 cp.num_rsp = ir->num_rsp;
810 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
811}
812
813int hci_inquiry(void __user *arg)
814{
815 __u8 __user *ptr = arg;
816 struct hci_inquiry_req ir;
817 struct hci_dev *hdev;
818 int err = 0, do_inquiry = 0, max_rsp;
819 long timeo;
820 __u8 *buf;
821
822 if (copy_from_user(&ir, ptr, sizeof(ir)))
823 return -EFAULT;
824
825 hdev = hci_dev_get(ir.dev_id);
826 if (!hdev)
827 return -ENODEV;
828
829 hci_dev_lock(hdev);
830 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
831 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
832 inquiry_cache_flush(hdev);
833 do_inquiry = 1;
834 }
835 hci_dev_unlock(hdev);
836
837 timeo = ir.length * msecs_to_jiffies(2000);
838
839 if (do_inquiry) {
840 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
841 timeo);
842 if (err < 0)
843 goto done;
844 }
845
846 /* for unlimited number of responses we will use buffer with
847 * 255 entries
848 */
849 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
850
851 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
852 * copy it to the user space.
853 */
854 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
855 if (!buf) {
856 err = -ENOMEM;
857 goto done;
858 }
859
860 hci_dev_lock(hdev);
861 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
862 hci_dev_unlock(hdev);
863
864 BT_DBG("num_rsp %d", ir.num_rsp);
865
866 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
867 ptr += sizeof(ir);
868 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
869 ir.num_rsp))
870 err = -EFAULT;
871 } else
872 err = -EFAULT;
873
874 kfree(buf);
875
876done:
877 hci_dev_put(hdev);
878 return err;
879}
880
881static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
882{
883 u8 ad_len = 0, flags = 0;
884 size_t name_len;
885
886 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
887 flags |= LE_AD_GENERAL;
888
889 if (!lmp_bredr_capable(hdev))
890 flags |= LE_AD_NO_BREDR;
891
892 if (lmp_le_br_capable(hdev))
893 flags |= LE_AD_SIM_LE_BREDR_CTRL;
894
895 if (lmp_host_le_br_capable(hdev))
896 flags |= LE_AD_SIM_LE_BREDR_HOST;
897
898 if (flags) {
899 BT_DBG("adv flags 0x%02x", flags);
900
901 ptr[0] = 2;
902 ptr[1] = EIR_FLAGS;
903 ptr[2] = flags;
904
905 ad_len += 3;
906 ptr += 3;
907 }
908
909 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
910 ptr[0] = 2;
911 ptr[1] = EIR_TX_POWER;
912 ptr[2] = (u8) hdev->adv_tx_power;
913
914 ad_len += 3;
915 ptr += 3;
916 }
917
918 name_len = strlen(hdev->dev_name);
919 if (name_len > 0) {
920 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
921
922 if (name_len > max_len) {
923 name_len = max_len;
924 ptr[1] = EIR_NAME_SHORT;
925 } else
926 ptr[1] = EIR_NAME_COMPLETE;
927
928 ptr[0] = name_len + 1;
929
930 memcpy(ptr + 2, hdev->dev_name, name_len);
931
932 ad_len += (name_len + 2);
933 ptr += (name_len + 2);
934 }
935
936 return ad_len;
937}
938
939int hci_update_ad(struct hci_dev *hdev)
940{
941 struct hci_cp_le_set_adv_data cp;
942 u8 len;
943 int err;
944
945 hci_dev_lock(hdev);
946
947 if (!lmp_le_capable(hdev)) {
948 err = -EINVAL;
949 goto unlock;
950 }
951
952 memset(&cp, 0, sizeof(cp));
953
954 len = create_ad(hdev, cp.data);
955
956 if (hdev->adv_data_len == len &&
957 memcmp(cp.data, hdev->adv_data, len) == 0) {
958 err = 0;
959 goto unlock;
960 }
961
962 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
963 hdev->adv_data_len = len;
964
965 cp.length = len;
966 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
967
968unlock:
969 hci_dev_unlock(hdev);
970
971 return err;
972}
973
974/* ---- HCI ioctl helpers ---- */
975
976int hci_dev_open(__u16 dev)
977{
978 struct hci_dev *hdev;
979 int ret = 0;
980
981 hdev = hci_dev_get(dev);
982 if (!hdev)
983 return -ENODEV;
984
985 BT_DBG("%s %p", hdev->name, hdev);
986
987 hci_req_lock(hdev);
988
989 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
990 ret = -ENODEV;
991 goto done;
992 }
993
994 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
995 ret = -ERFKILL;
996 goto done;
997 }
998
999 if (test_bit(HCI_UP, &hdev->flags)) {
1000 ret = -EALREADY;
1001 goto done;
1002 }
1003
1004 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1005 set_bit(HCI_RAW, &hdev->flags);
1006
1007 /* Treat all non BR/EDR controllers as raw devices if
1008 enable_hs is not set */
1009 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1010 set_bit(HCI_RAW, &hdev->flags);
1011
1012 if (hdev->open(hdev)) {
1013 ret = -EIO;
1014 goto done;
1015 }
1016
1017 if (!test_bit(HCI_RAW, &hdev->flags)) {
1018 atomic_set(&hdev->cmd_cnt, 1);
1019 set_bit(HCI_INIT, &hdev->flags);
1020 ret = __hci_init(hdev);
1021 clear_bit(HCI_INIT, &hdev->flags);
1022 }
1023
1024 if (!ret) {
1025 hci_dev_hold(hdev);
1026 set_bit(HCI_UP, &hdev->flags);
1027 hci_notify(hdev, HCI_DEV_UP);
1028 hci_update_ad(hdev);
1029 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1030 mgmt_valid_hdev(hdev)) {
1031 hci_dev_lock(hdev);
1032 mgmt_powered(hdev, 1);
1033 hci_dev_unlock(hdev);
1034 }
1035 } else {
1036 /* Init failed, cleanup */
1037 flush_work(&hdev->tx_work);
1038 flush_work(&hdev->cmd_work);
1039 flush_work(&hdev->rx_work);
1040
1041 skb_queue_purge(&hdev->cmd_q);
1042 skb_queue_purge(&hdev->rx_q);
1043
1044 if (hdev->flush)
1045 hdev->flush(hdev);
1046
1047 if (hdev->sent_cmd) {
1048 kfree_skb(hdev->sent_cmd);
1049 hdev->sent_cmd = NULL;
1050 }
1051
1052 hdev->close(hdev);
1053 hdev->flags = 0;
1054 }
1055
1056done:
1057 hci_req_unlock(hdev);
1058 hci_dev_put(hdev);
1059 return ret;
1060}
1061
1062static int hci_dev_do_close(struct hci_dev *hdev)
1063{
1064 BT_DBG("%s %p", hdev->name, hdev);
1065
1066 cancel_work_sync(&hdev->le_scan);
1067
1068 cancel_delayed_work(&hdev->power_off);
1069
1070 hci_req_cancel(hdev, ENODEV);
1071 hci_req_lock(hdev);
1072
1073 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1074 del_timer_sync(&hdev->cmd_timer);
1075 hci_req_unlock(hdev);
1076 return 0;
1077 }
1078
1079 /* Flush RX and TX works */
1080 flush_work(&hdev->tx_work);
1081 flush_work(&hdev->rx_work);
1082
1083 if (hdev->discov_timeout > 0) {
1084 cancel_delayed_work(&hdev->discov_off);
1085 hdev->discov_timeout = 0;
1086 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1087 }
1088
1089 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1090 cancel_delayed_work(&hdev->service_cache);
1091
1092 cancel_delayed_work_sync(&hdev->le_scan_disable);
1093
1094 hci_dev_lock(hdev);
1095 inquiry_cache_flush(hdev);
1096 hci_conn_hash_flush(hdev);
1097 hci_dev_unlock(hdev);
1098
1099 hci_notify(hdev, HCI_DEV_DOWN);
1100
1101 if (hdev->flush)
1102 hdev->flush(hdev);
1103
1104 /* Reset device */
1105 skb_queue_purge(&hdev->cmd_q);
1106 atomic_set(&hdev->cmd_cnt, 1);
1107 if (!test_bit(HCI_RAW, &hdev->flags) &&
1108 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1109 set_bit(HCI_INIT, &hdev->flags);
1110 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1111 clear_bit(HCI_INIT, &hdev->flags);
1112 }
1113
1114 /* flush cmd work */
1115 flush_work(&hdev->cmd_work);
1116
1117 /* Drop queues */
1118 skb_queue_purge(&hdev->rx_q);
1119 skb_queue_purge(&hdev->cmd_q);
1120 skb_queue_purge(&hdev->raw_q);
1121
1122 /* Drop last sent command */
1123 if (hdev->sent_cmd) {
1124 del_timer_sync(&hdev->cmd_timer);
1125 kfree_skb(hdev->sent_cmd);
1126 hdev->sent_cmd = NULL;
1127 }
1128
1129 /* After this point our queues are empty
1130 * and no tasks are scheduled. */
1131 hdev->close(hdev);
1132
1133 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1134 mgmt_valid_hdev(hdev)) {
1135 hci_dev_lock(hdev);
1136 mgmt_powered(hdev, 0);
1137 hci_dev_unlock(hdev);
1138 }
1139
1140 /* Clear flags */
1141 hdev->flags = 0;
1142 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1143
1144 /* Controller radio is available but is currently powered down */
1145 hdev->amp_status = 0;
1146
1147 memset(hdev->eir, 0, sizeof(hdev->eir));
1148 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1149
1150 hci_req_unlock(hdev);
1151
1152 hci_dev_put(hdev);
1153 return 0;
1154}
1155
1156int hci_dev_close(__u16 dev)
1157{
1158 struct hci_dev *hdev;
1159 int err;
1160
1161 hdev = hci_dev_get(dev);
1162 if (!hdev)
1163 return -ENODEV;
1164
1165 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1166 cancel_delayed_work(&hdev->power_off);
1167
1168 err = hci_dev_do_close(hdev);
1169
1170 hci_dev_put(hdev);
1171 return err;
1172}
1173
1174int hci_dev_reset(__u16 dev)
1175{
1176 struct hci_dev *hdev;
1177 int ret = 0;
1178
1179 hdev = hci_dev_get(dev);
1180 if (!hdev)
1181 return -ENODEV;
1182
1183 hci_req_lock(hdev);
1184
1185 if (!test_bit(HCI_UP, &hdev->flags))
1186 goto done;
1187
1188 /* Drop queues */
1189 skb_queue_purge(&hdev->rx_q);
1190 skb_queue_purge(&hdev->cmd_q);
1191
1192 hci_dev_lock(hdev);
1193 inquiry_cache_flush(hdev);
1194 hci_conn_hash_flush(hdev);
1195 hci_dev_unlock(hdev);
1196
1197 if (hdev->flush)
1198 hdev->flush(hdev);
1199
1200 atomic_set(&hdev->cmd_cnt, 1);
1201 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1202
1203 if (!test_bit(HCI_RAW, &hdev->flags))
1204 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1205
1206done:
1207 hci_req_unlock(hdev);
1208 hci_dev_put(hdev);
1209 return ret;
1210}
1211
1212int hci_dev_reset_stat(__u16 dev)
1213{
1214 struct hci_dev *hdev;
1215 int ret = 0;
1216
1217 hdev = hci_dev_get(dev);
1218 if (!hdev)
1219 return -ENODEV;
1220
1221 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1222
1223 hci_dev_put(hdev);
1224
1225 return ret;
1226}
1227
1228int hci_dev_cmd(unsigned int cmd, void __user *arg)
1229{
1230 struct hci_dev *hdev;
1231 struct hci_dev_req dr;
1232 int err = 0;
1233
1234 if (copy_from_user(&dr, arg, sizeof(dr)))
1235 return -EFAULT;
1236
1237 hdev = hci_dev_get(dr.dev_id);
1238 if (!hdev)
1239 return -ENODEV;
1240
1241 switch (cmd) {
1242 case HCISETAUTH:
1243 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1244 HCI_INIT_TIMEOUT);
1245 break;
1246
1247 case HCISETENCRYPT:
1248 if (!lmp_encrypt_capable(hdev)) {
1249 err = -EOPNOTSUPP;
1250 break;
1251 }
1252
1253 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1254 /* Auth must be enabled first */
1255 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1256 HCI_INIT_TIMEOUT);
1257 if (err)
1258 break;
1259 }
1260
1261 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1262 HCI_INIT_TIMEOUT);
1263 break;
1264
1265 case HCISETSCAN:
1266 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1267 HCI_INIT_TIMEOUT);
1268 break;
1269
1270 case HCISETLINKPOL:
1271 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1272 HCI_INIT_TIMEOUT);
1273 break;
1274
1275 case HCISETLINKMODE:
1276 hdev->link_mode = ((__u16) dr.dev_opt) &
1277 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1278 break;
1279
1280 case HCISETPTYPE:
1281 hdev->pkt_type = (__u16) dr.dev_opt;
1282 break;
1283
1284 case HCISETACLMTU:
1285 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1286 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1287 break;
1288
1289 case HCISETSCOMTU:
1290 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1291 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1292 break;
1293
1294 default:
1295 err = -EINVAL;
1296 break;
1297 }
1298
1299 hci_dev_put(hdev);
1300 return err;
1301}
1302
1303int hci_get_dev_list(void __user *arg)
1304{
1305 struct hci_dev *hdev;
1306 struct hci_dev_list_req *dl;
1307 struct hci_dev_req *dr;
1308 int n = 0, size, err;
1309 __u16 dev_num;
1310
1311 if (get_user(dev_num, (__u16 __user *) arg))
1312 return -EFAULT;
1313
1314 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1315 return -EINVAL;
1316
1317 size = sizeof(*dl) + dev_num * sizeof(*dr);
1318
1319 dl = kzalloc(size, GFP_KERNEL);
1320 if (!dl)
1321 return -ENOMEM;
1322
1323 dr = dl->dev_req;
1324
1325 read_lock(&hci_dev_list_lock);
1326 list_for_each_entry(hdev, &hci_dev_list, list) {
1327 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1328 cancel_delayed_work(&hdev->power_off);
1329
1330 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1331 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1332
1333 (dr + n)->dev_id = hdev->id;
1334 (dr + n)->dev_opt = hdev->flags;
1335
1336 if (++n >= dev_num)
1337 break;
1338 }
1339 read_unlock(&hci_dev_list_lock);
1340
1341 dl->dev_num = n;
1342 size = sizeof(*dl) + n * sizeof(*dr);
1343
1344 err = copy_to_user(arg, dl, size);
1345 kfree(dl);
1346
1347 return err ? -EFAULT : 0;
1348}
1349
1350int hci_get_dev_info(void __user *arg)
1351{
1352 struct hci_dev *hdev;
1353 struct hci_dev_info di;
1354 int err = 0;
1355
1356 if (copy_from_user(&di, arg, sizeof(di)))
1357 return -EFAULT;
1358
1359 hdev = hci_dev_get(di.dev_id);
1360 if (!hdev)
1361 return -ENODEV;
1362
1363 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1364 cancel_delayed_work_sync(&hdev->power_off);
1365
1366 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1367 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1368
1369 strcpy(di.name, hdev->name);
1370 di.bdaddr = hdev->bdaddr;
1371 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1372 di.flags = hdev->flags;
1373 di.pkt_type = hdev->pkt_type;
1374 if (lmp_bredr_capable(hdev)) {
1375 di.acl_mtu = hdev->acl_mtu;
1376 di.acl_pkts = hdev->acl_pkts;
1377 di.sco_mtu = hdev->sco_mtu;
1378 di.sco_pkts = hdev->sco_pkts;
1379 } else {
1380 di.acl_mtu = hdev->le_mtu;
1381 di.acl_pkts = hdev->le_pkts;
1382 di.sco_mtu = 0;
1383 di.sco_pkts = 0;
1384 }
1385 di.link_policy = hdev->link_policy;
1386 di.link_mode = hdev->link_mode;
1387
1388 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1389 memcpy(&di.features, &hdev->features, sizeof(di.features));
1390
1391 if (copy_to_user(arg, &di, sizeof(di)))
1392 err = -EFAULT;
1393
1394 hci_dev_put(hdev);
1395
1396 return err;
1397}
1398
1399/* ---- Interface to HCI drivers ---- */
1400
1401static int hci_rfkill_set_block(void *data, bool blocked)
1402{
1403 struct hci_dev *hdev = data;
1404
1405 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1406
1407 if (!blocked)
1408 return 0;
1409
1410 hci_dev_do_close(hdev);
1411
1412 return 0;
1413}
1414
1415static const struct rfkill_ops hci_rfkill_ops = {
1416 .set_block = hci_rfkill_set_block,
1417};
1418
1419static void hci_power_on(struct work_struct *work)
1420{
1421 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1422
1423 BT_DBG("%s", hdev->name);
1424
1425 if (hci_dev_open(hdev->id) < 0)
1426 return;
1427
1428 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1429 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1430 HCI_AUTO_OFF_TIMEOUT);
1431
1432 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1433 mgmt_index_added(hdev);
1434}
1435
1436static void hci_power_off(struct work_struct *work)
1437{
1438 struct hci_dev *hdev = container_of(work, struct hci_dev,
1439 power_off.work);
1440
1441 BT_DBG("%s", hdev->name);
1442
1443 hci_dev_do_close(hdev);
1444}
1445
1446static void hci_discov_off(struct work_struct *work)
1447{
1448 struct hci_dev *hdev;
1449 u8 scan = SCAN_PAGE;
1450
1451 hdev = container_of(work, struct hci_dev, discov_off.work);
1452
1453 BT_DBG("%s", hdev->name);
1454
1455 hci_dev_lock(hdev);
1456
1457 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1458
1459 hdev->discov_timeout = 0;
1460
1461 hci_dev_unlock(hdev);
1462}
1463
1464int hci_uuids_clear(struct hci_dev *hdev)
1465{
1466 struct bt_uuid *uuid, *tmp;
1467
1468 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1469 list_del(&uuid->list);
1470 kfree(uuid);
1471 }
1472
1473 return 0;
1474}
1475
1476int hci_link_keys_clear(struct hci_dev *hdev)
1477{
1478 struct list_head *p, *n;
1479
1480 list_for_each_safe(p, n, &hdev->link_keys) {
1481 struct link_key *key;
1482
1483 key = list_entry(p, struct link_key, list);
1484
1485 list_del(p);
1486 kfree(key);
1487 }
1488
1489 return 0;
1490}
1491
1492int hci_smp_ltks_clear(struct hci_dev *hdev)
1493{
1494 struct smp_ltk *k, *tmp;
1495
1496 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1497 list_del(&k->list);
1498 kfree(k);
1499 }
1500
1501 return 0;
1502}
1503
1504struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1505{
1506 struct link_key *k;
1507
1508 list_for_each_entry(k, &hdev->link_keys, list)
1509 if (bacmp(bdaddr, &k->bdaddr) == 0)
1510 return k;
1511
1512 return NULL;
1513}
1514
1515static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1516 u8 key_type, u8 old_key_type)
1517{
1518 /* Legacy key */
1519 if (key_type < 0x03)
1520 return true;
1521
1522 /* Debug keys are insecure so don't store them persistently */
1523 if (key_type == HCI_LK_DEBUG_COMBINATION)
1524 return false;
1525
1526 /* Changed combination key and there's no previous one */
1527 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1528 return false;
1529
1530 /* Security mode 3 case */
1531 if (!conn)
1532 return true;
1533
1534 /* Neither local nor remote side had no-bonding as requirement */
1535 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1536 return true;
1537
1538 /* Local side had dedicated bonding as requirement */
1539 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1540 return true;
1541
1542 /* Remote side had dedicated bonding as requirement */
1543 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1544 return true;
1545
1546 /* If none of the above criteria match, then don't store the key
1547 * persistently */
1548 return false;
1549}
1550
1551struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1552{
1553 struct smp_ltk *k;
1554
1555 list_for_each_entry(k, &hdev->long_term_keys, list) {
1556 if (k->ediv != ediv ||
1557 memcmp(rand, k->rand, sizeof(k->rand)))
1558 continue;
1559
1560 return k;
1561 }
1562
1563 return NULL;
1564}
1565
1566struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1567 u8 addr_type)
1568{
1569 struct smp_ltk *k;
1570
1571 list_for_each_entry(k, &hdev->long_term_keys, list)
1572 if (addr_type == k->bdaddr_type &&
1573 bacmp(bdaddr, &k->bdaddr) == 0)
1574 return k;
1575
1576 return NULL;
1577}
1578
1579int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1580 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1581{
1582 struct link_key *key, *old_key;
1583 u8 old_key_type;
1584 bool persistent;
1585
1586 old_key = hci_find_link_key(hdev, bdaddr);
1587 if (old_key) {
1588 old_key_type = old_key->type;
1589 key = old_key;
1590 } else {
1591 old_key_type = conn ? conn->key_type : 0xff;
1592 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1593 if (!key)
1594 return -ENOMEM;
1595 list_add(&key->list, &hdev->link_keys);
1596 }
1597
1598 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1599
1600 /* Some buggy controller combinations generate a changed
1601 * combination key for legacy pairing even when there's no
1602 * previous key */
1603 if (type == HCI_LK_CHANGED_COMBINATION &&
1604 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1605 type = HCI_LK_COMBINATION;
1606 if (conn)
1607 conn->key_type = type;
1608 }
1609
1610 bacpy(&key->bdaddr, bdaddr);
1611 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1612 key->pin_len = pin_len;
1613
1614 if (type == HCI_LK_CHANGED_COMBINATION)
1615 key->type = old_key_type;
1616 else
1617 key->type = type;
1618
1619 if (!new_key)
1620 return 0;
1621
1622 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1623
1624 mgmt_new_link_key(hdev, key, persistent);
1625
1626 if (conn)
1627 conn->flush_key = !persistent;
1628
1629 return 0;
1630}
1631
1632int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1633 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1634 ediv, u8 rand[8])
1635{
1636 struct smp_ltk *key, *old_key;
1637
1638 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1639 return 0;
1640
1641 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1642 if (old_key)
1643 key = old_key;
1644 else {
1645 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1646 if (!key)
1647 return -ENOMEM;
1648 list_add(&key->list, &hdev->long_term_keys);
1649 }
1650
1651 bacpy(&key->bdaddr, bdaddr);
1652 key->bdaddr_type = addr_type;
1653 memcpy(key->val, tk, sizeof(key->val));
1654 key->authenticated = authenticated;
1655 key->ediv = ediv;
1656 key->enc_size = enc_size;
1657 key->type = type;
1658 memcpy(key->rand, rand, sizeof(key->rand));
1659
1660 if (!new_key)
1661 return 0;
1662
1663 if (type & HCI_SMP_LTK)
1664 mgmt_new_ltk(hdev, key, 1);
1665
1666 return 0;
1667}
1668
1669int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1670{
1671 struct link_key *key;
1672
1673 key = hci_find_link_key(hdev, bdaddr);
1674 if (!key)
1675 return -ENOENT;
1676
1677 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1678
1679 list_del(&key->list);
1680 kfree(key);
1681
1682 return 0;
1683}
1684
1685int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1686{
1687 struct smp_ltk *k, *tmp;
1688
1689 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1690 if (bacmp(bdaddr, &k->bdaddr))
1691 continue;
1692
1693 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1694
1695 list_del(&k->list);
1696 kfree(k);
1697 }
1698
1699 return 0;
1700}
1701
1702/* HCI command timer function */
1703static void hci_cmd_timeout(unsigned long arg)
1704{
1705 struct hci_dev *hdev = (void *) arg;
1706
1707 if (hdev->sent_cmd) {
1708 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1709 u16 opcode = __le16_to_cpu(sent->opcode);
1710
1711 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1712 } else {
1713 BT_ERR("%s command tx timeout", hdev->name);
1714 }
1715
1716 atomic_set(&hdev->cmd_cnt, 1);
1717 queue_work(hdev->workqueue, &hdev->cmd_work);
1718}
1719
1720struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1721 bdaddr_t *bdaddr)
1722{
1723 struct oob_data *data;
1724
1725 list_for_each_entry(data, &hdev->remote_oob_data, list)
1726 if (bacmp(bdaddr, &data->bdaddr) == 0)
1727 return data;
1728
1729 return NULL;
1730}
1731
1732int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1733{
1734 struct oob_data *data;
1735
1736 data = hci_find_remote_oob_data(hdev, bdaddr);
1737 if (!data)
1738 return -ENOENT;
1739
1740 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1741
1742 list_del(&data->list);
1743 kfree(data);
1744
1745 return 0;
1746}
1747
1748int hci_remote_oob_data_clear(struct hci_dev *hdev)
1749{
1750 struct oob_data *data, *n;
1751
1752 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1753 list_del(&data->list);
1754 kfree(data);
1755 }
1756
1757 return 0;
1758}
1759
1760int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1761 u8 *randomizer)
1762{
1763 struct oob_data *data;
1764
1765 data = hci_find_remote_oob_data(hdev, bdaddr);
1766
1767 if (!data) {
1768 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1769 if (!data)
1770 return -ENOMEM;
1771
1772 bacpy(&data->bdaddr, bdaddr);
1773 list_add(&data->list, &hdev->remote_oob_data);
1774 }
1775
1776 memcpy(data->hash, hash, sizeof(data->hash));
1777 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1778
1779 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1780
1781 return 0;
1782}
1783
1784struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1785{
1786 struct bdaddr_list *b;
1787
1788 list_for_each_entry(b, &hdev->blacklist, list)
1789 if (bacmp(bdaddr, &b->bdaddr) == 0)
1790 return b;
1791
1792 return NULL;
1793}
1794
1795int hci_blacklist_clear(struct hci_dev *hdev)
1796{
1797 struct list_head *p, *n;
1798
1799 list_for_each_safe(p, n, &hdev->blacklist) {
1800 struct bdaddr_list *b;
1801
1802 b = list_entry(p, struct bdaddr_list, list);
1803
1804 list_del(p);
1805 kfree(b);
1806 }
1807
1808 return 0;
1809}
1810
1811int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1812{
1813 struct bdaddr_list *entry;
1814
1815 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1816 return -EBADF;
1817
1818 if (hci_blacklist_lookup(hdev, bdaddr))
1819 return -EEXIST;
1820
1821 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1822 if (!entry)
1823 return -ENOMEM;
1824
1825 bacpy(&entry->bdaddr, bdaddr);
1826
1827 list_add(&entry->list, &hdev->blacklist);
1828
1829 return mgmt_device_blocked(hdev, bdaddr, type);
1830}
1831
1832int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1833{
1834 struct bdaddr_list *entry;
1835
1836 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1837 return hci_blacklist_clear(hdev);
1838
1839 entry = hci_blacklist_lookup(hdev, bdaddr);
1840 if (!entry)
1841 return -ENOENT;
1842
1843 list_del(&entry->list);
1844 kfree(entry);
1845
1846 return mgmt_device_unblocked(hdev, bdaddr, type);
1847}
1848
1849static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1850{
1851 struct le_scan_params *param = (struct le_scan_params *) opt;
1852 struct hci_cp_le_set_scan_param cp;
1853
1854 memset(&cp, 0, sizeof(cp));
1855 cp.type = param->type;
1856 cp.interval = cpu_to_le16(param->interval);
1857 cp.window = cpu_to_le16(param->window);
1858
1859 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1860}
1861
1862static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1863{
1864 struct hci_cp_le_set_scan_enable cp;
1865
1866 memset(&cp, 0, sizeof(cp));
1867 cp.enable = 1;
1868 cp.filter_dup = 1;
1869
1870 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1871}
1872
1873static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1874 u16 window, int timeout)
1875{
1876 long timeo = msecs_to_jiffies(3000);
1877 struct le_scan_params param;
1878 int err;
1879
1880 BT_DBG("%s", hdev->name);
1881
1882 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1883 return -EINPROGRESS;
1884
1885 param.type = type;
1886 param.interval = interval;
1887 param.window = window;
1888
1889 hci_req_lock(hdev);
1890
1891 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1892 timeo);
1893 if (!err)
1894 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1895
1896 hci_req_unlock(hdev);
1897
1898 if (err < 0)
1899 return err;
1900
1901 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1902 msecs_to_jiffies(timeout));
1903
1904 return 0;
1905}
1906
1907int hci_cancel_le_scan(struct hci_dev *hdev)
1908{
1909 BT_DBG("%s", hdev->name);
1910
1911 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1912 return -EALREADY;
1913
1914 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1915 struct hci_cp_le_set_scan_enable cp;
1916
1917 /* Send HCI command to disable LE Scan */
1918 memset(&cp, 0, sizeof(cp));
1919 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1920 }
1921
1922 return 0;
1923}
1924
1925static void le_scan_disable_work(struct work_struct *work)
1926{
1927 struct hci_dev *hdev = container_of(work, struct hci_dev,
1928 le_scan_disable.work);
1929 struct hci_cp_le_set_scan_enable cp;
1930
1931 BT_DBG("%s", hdev->name);
1932
1933 memset(&cp, 0, sizeof(cp));
1934
1935 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1936}
1937
1938static void le_scan_work(struct work_struct *work)
1939{
1940 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1941 struct le_scan_params *param = &hdev->le_scan_params;
1942
1943 BT_DBG("%s", hdev->name);
1944
1945 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1946 param->timeout);
1947}
1948
1949int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1950 int timeout)
1951{
1952 struct le_scan_params *param = &hdev->le_scan_params;
1953
1954 BT_DBG("%s", hdev->name);
1955
1956 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1957 return -ENOTSUPP;
1958
1959 if (work_busy(&hdev->le_scan))
1960 return -EINPROGRESS;
1961
1962 param->type = type;
1963 param->interval = interval;
1964 param->window = window;
1965 param->timeout = timeout;
1966
1967 queue_work(system_long_wq, &hdev->le_scan);
1968
1969 return 0;
1970}
1971
1972/* Alloc HCI device */
1973struct hci_dev *hci_alloc_dev(void)
1974{
1975 struct hci_dev *hdev;
1976
1977 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1978 if (!hdev)
1979 return NULL;
1980
1981 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1982 hdev->esco_type = (ESCO_HV1);
1983 hdev->link_mode = (HCI_LM_ACCEPT);
1984 hdev->io_capability = 0x03; /* No Input No Output */
1985 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1986 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1987
1988 hdev->sniff_max_interval = 800;
1989 hdev->sniff_min_interval = 80;
1990
1991 mutex_init(&hdev->lock);
1992 mutex_init(&hdev->req_lock);
1993
1994 INIT_LIST_HEAD(&hdev->mgmt_pending);
1995 INIT_LIST_HEAD(&hdev->blacklist);
1996 INIT_LIST_HEAD(&hdev->uuids);
1997 INIT_LIST_HEAD(&hdev->link_keys);
1998 INIT_LIST_HEAD(&hdev->long_term_keys);
1999 INIT_LIST_HEAD(&hdev->remote_oob_data);
2000 INIT_LIST_HEAD(&hdev->conn_hash.list);
2001
2002 INIT_WORK(&hdev->rx_work, hci_rx_work);
2003 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2004 INIT_WORK(&hdev->tx_work, hci_tx_work);
2005 INIT_WORK(&hdev->power_on, hci_power_on);
2006 INIT_WORK(&hdev->le_scan, le_scan_work);
2007
2008 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2009 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2010 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2011
2012 skb_queue_head_init(&hdev->driver_init);
2013 skb_queue_head_init(&hdev->rx_q);
2014 skb_queue_head_init(&hdev->cmd_q);
2015 skb_queue_head_init(&hdev->raw_q);
2016
2017 init_waitqueue_head(&hdev->req_wait_q);
2018
2019 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2020
2021 hci_init_sysfs(hdev);
2022 discovery_init(hdev);
2023
2024 return hdev;
2025}
2026EXPORT_SYMBOL(hci_alloc_dev);
2027
2028/* Free HCI device */
2029void hci_free_dev(struct hci_dev *hdev)
2030{
2031 skb_queue_purge(&hdev->driver_init);
2032
2033 /* will free via device release */
2034 put_device(&hdev->dev);
2035}
2036EXPORT_SYMBOL(hci_free_dev);
2037
2038/* Register HCI device */
2039int hci_register_dev(struct hci_dev *hdev)
2040{
2041 int id, error;
2042
2043 if (!hdev->open || !hdev->close)
2044 return -EINVAL;
2045
2046 /* Do not allow HCI_AMP devices to register at index 0,
2047 * so the index can be used as the AMP controller ID.
2048 */
2049 switch (hdev->dev_type) {
2050 case HCI_BREDR:
2051 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2052 break;
2053 case HCI_AMP:
2054 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2055 break;
2056 default:
2057 return -EINVAL;
2058 }
2059
2060 if (id < 0)
2061 return id;
2062
2063 sprintf(hdev->name, "hci%d", id);
2064 hdev->id = id;
2065
2066 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2067
2068 write_lock(&hci_dev_list_lock);
2069 list_add(&hdev->list, &hci_dev_list);
2070 write_unlock(&hci_dev_list_lock);
2071
2072 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2073 WQ_MEM_RECLAIM, 1);
2074 if (!hdev->workqueue) {
2075 error = -ENOMEM;
2076 goto err;
2077 }
2078
2079 hdev->req_workqueue = alloc_workqueue(hdev->name,
2080 WQ_HIGHPRI | WQ_UNBOUND |
2081 WQ_MEM_RECLAIM, 1);
2082 if (!hdev->req_workqueue) {
2083 destroy_workqueue(hdev->workqueue);
2084 error = -ENOMEM;
2085 goto err;
2086 }
2087
2088 error = hci_add_sysfs(hdev);
2089 if (error < 0)
2090 goto err_wqueue;
2091
2092 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2093 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2094 hdev);
2095 if (hdev->rfkill) {
2096 if (rfkill_register(hdev->rfkill) < 0) {
2097 rfkill_destroy(hdev->rfkill);
2098 hdev->rfkill = NULL;
2099 }
2100 }
2101
2102 set_bit(HCI_SETUP, &hdev->dev_flags);
2103
2104 if (hdev->dev_type != HCI_AMP)
2105 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2106
2107 hci_notify(hdev, HCI_DEV_REG);
2108 hci_dev_hold(hdev);
2109
2110 queue_work(hdev->req_workqueue, &hdev->power_on);
2111
2112 return id;
2113
2114err_wqueue:
2115 destroy_workqueue(hdev->workqueue);
2116 destroy_workqueue(hdev->req_workqueue);
2117err:
2118 ida_simple_remove(&hci_index_ida, hdev->id);
2119 write_lock(&hci_dev_list_lock);
2120 list_del(&hdev->list);
2121 write_unlock(&hci_dev_list_lock);
2122
2123 return error;
2124}
2125EXPORT_SYMBOL(hci_register_dev);
2126
2127/* Unregister HCI device */
2128void hci_unregister_dev(struct hci_dev *hdev)
2129{
2130 int i, id;
2131
2132 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2133
2134 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2135
2136 id = hdev->id;
2137
2138 write_lock(&hci_dev_list_lock);
2139 list_del(&hdev->list);
2140 write_unlock(&hci_dev_list_lock);
2141
2142 hci_dev_do_close(hdev);
2143
2144 for (i = 0; i < NUM_REASSEMBLY; i++)
2145 kfree_skb(hdev->reassembly[i]);
2146
2147 cancel_work_sync(&hdev->power_on);
2148
2149 if (!test_bit(HCI_INIT, &hdev->flags) &&
2150 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2151 hci_dev_lock(hdev);
2152 mgmt_index_removed(hdev);
2153 hci_dev_unlock(hdev);
2154 }
2155
2156 /* mgmt_index_removed should take care of emptying the
2157 * pending list */
2158 BUG_ON(!list_empty(&hdev->mgmt_pending));
2159
2160 hci_notify(hdev, HCI_DEV_UNREG);
2161
2162 if (hdev->rfkill) {
2163 rfkill_unregister(hdev->rfkill);
2164 rfkill_destroy(hdev->rfkill);
2165 }
2166
2167 hci_del_sysfs(hdev);
2168
2169 destroy_workqueue(hdev->workqueue);
2170 destroy_workqueue(hdev->req_workqueue);
2171
2172 hci_dev_lock(hdev);
2173 hci_blacklist_clear(hdev);
2174 hci_uuids_clear(hdev);
2175 hci_link_keys_clear(hdev);
2176 hci_smp_ltks_clear(hdev);
2177 hci_remote_oob_data_clear(hdev);
2178 hci_dev_unlock(hdev);
2179
2180 hci_dev_put(hdev);
2181
2182 ida_simple_remove(&hci_index_ida, id);
2183}
2184EXPORT_SYMBOL(hci_unregister_dev);
2185
2186/* Suspend HCI device */
2187int hci_suspend_dev(struct hci_dev *hdev)
2188{
2189 hci_notify(hdev, HCI_DEV_SUSPEND);
2190 return 0;
2191}
2192EXPORT_SYMBOL(hci_suspend_dev);
2193
2194/* Resume HCI device */
2195int hci_resume_dev(struct hci_dev *hdev)
2196{
2197 hci_notify(hdev, HCI_DEV_RESUME);
2198 return 0;
2199}
2200EXPORT_SYMBOL(hci_resume_dev);
2201
2202/* Receive frame from HCI drivers */
2203int hci_recv_frame(struct sk_buff *skb)
2204{
2205 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2206 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2207 && !test_bit(HCI_INIT, &hdev->flags))) {
2208 kfree_skb(skb);
2209 return -ENXIO;
2210 }
2211
2212 /* Incoming skb */
2213 bt_cb(skb)->incoming = 1;
2214
2215 /* Time stamp */
2216 __net_timestamp(skb);
2217
2218 skb_queue_tail(&hdev->rx_q, skb);
2219 queue_work(hdev->workqueue, &hdev->rx_work);
2220
2221 return 0;
2222}
2223EXPORT_SYMBOL(hci_recv_frame);
2224
2225static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2226 int count, __u8 index)
2227{
2228 int len = 0;
2229 int hlen = 0;
2230 int remain = count;
2231 struct sk_buff *skb;
2232 struct bt_skb_cb *scb;
2233
2234 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2235 index >= NUM_REASSEMBLY)
2236 return -EILSEQ;
2237
2238 skb = hdev->reassembly[index];
2239
2240 if (!skb) {
2241 switch (type) {
2242 case HCI_ACLDATA_PKT:
2243 len = HCI_MAX_FRAME_SIZE;
2244 hlen = HCI_ACL_HDR_SIZE;
2245 break;
2246 case HCI_EVENT_PKT:
2247 len = HCI_MAX_EVENT_SIZE;
2248 hlen = HCI_EVENT_HDR_SIZE;
2249 break;
2250 case HCI_SCODATA_PKT:
2251 len = HCI_MAX_SCO_SIZE;
2252 hlen = HCI_SCO_HDR_SIZE;
2253 break;
2254 }
2255
2256 skb = bt_skb_alloc(len, GFP_ATOMIC);
2257 if (!skb)
2258 return -ENOMEM;
2259
2260 scb = (void *) skb->cb;
2261 scb->expect = hlen;
2262 scb->pkt_type = type;
2263
2264 skb->dev = (void *) hdev;
2265 hdev->reassembly[index] = skb;
2266 }
2267
2268 while (count) {
2269 scb = (void *) skb->cb;
2270 len = min_t(uint, scb->expect, count);
2271
2272 memcpy(skb_put(skb, len), data, len);
2273
2274 count -= len;
2275 data += len;
2276 scb->expect -= len;
2277 remain = count;
2278
2279 switch (type) {
2280 case HCI_EVENT_PKT:
2281 if (skb->len == HCI_EVENT_HDR_SIZE) {
2282 struct hci_event_hdr *h = hci_event_hdr(skb);
2283 scb->expect = h->plen;
2284
2285 if (skb_tailroom(skb) < scb->expect) {
2286 kfree_skb(skb);
2287 hdev->reassembly[index] = NULL;
2288 return -ENOMEM;
2289 }
2290 }
2291 break;
2292
2293 case HCI_ACLDATA_PKT:
2294 if (skb->len == HCI_ACL_HDR_SIZE) {
2295 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2296 scb->expect = __le16_to_cpu(h->dlen);
2297
2298 if (skb_tailroom(skb) < scb->expect) {
2299 kfree_skb(skb);
2300 hdev->reassembly[index] = NULL;
2301 return -ENOMEM;
2302 }
2303 }
2304 break;
2305
2306 case HCI_SCODATA_PKT:
2307 if (skb->len == HCI_SCO_HDR_SIZE) {
2308 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2309 scb->expect = h->dlen;
2310
2311 if (skb_tailroom(skb) < scb->expect) {
2312 kfree_skb(skb);
2313 hdev->reassembly[index] = NULL;
2314 return -ENOMEM;
2315 }
2316 }
2317 break;
2318 }
2319
2320 if (scb->expect == 0) {
2321 /* Complete frame */
2322
2323 bt_cb(skb)->pkt_type = type;
2324 hci_recv_frame(skb);
2325
2326 hdev->reassembly[index] = NULL;
2327 return remain;
2328 }
2329 }
2330
2331 return remain;
2332}
2333
2334int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2335{
2336 int rem = 0;
2337
2338 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2339 return -EILSEQ;
2340
2341 while (count) {
2342 rem = hci_reassembly(hdev, type, data, count, type - 1);
2343 if (rem < 0)
2344 return rem;
2345
2346 data += (count - rem);
2347 count = rem;
2348 }
2349
2350 return rem;
2351}
2352EXPORT_SYMBOL(hci_recv_fragment);
2353
2354#define STREAM_REASSEMBLY 0
2355
2356int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2357{
2358 int type;
2359 int rem = 0;
2360
2361 while (count) {
2362 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2363
2364 if (!skb) {
2365 struct { char type; } *pkt;
2366
2367 /* Start of the frame */
2368 pkt = data;
2369 type = pkt->type;
2370
2371 data++;
2372 count--;
2373 } else
2374 type = bt_cb(skb)->pkt_type;
2375
2376 rem = hci_reassembly(hdev, type, data, count,
2377 STREAM_REASSEMBLY);
2378 if (rem < 0)
2379 return rem;
2380
2381 data += (count - rem);
2382 count = rem;
2383 }
2384
2385 return rem;
2386}
2387EXPORT_SYMBOL(hci_recv_stream_fragment);
2388
2389/* ---- Interface to upper protocols ---- */
2390
2391int hci_register_cb(struct hci_cb *cb)
2392{
2393 BT_DBG("%p name %s", cb, cb->name);
2394
2395 write_lock(&hci_cb_list_lock);
2396 list_add(&cb->list, &hci_cb_list);
2397 write_unlock(&hci_cb_list_lock);
2398
2399 return 0;
2400}
2401EXPORT_SYMBOL(hci_register_cb);
2402
2403int hci_unregister_cb(struct hci_cb *cb)
2404{
2405 BT_DBG("%p name %s", cb, cb->name);
2406
2407 write_lock(&hci_cb_list_lock);
2408 list_del(&cb->list);
2409 write_unlock(&hci_cb_list_lock);
2410
2411 return 0;
2412}
2413EXPORT_SYMBOL(hci_unregister_cb);
2414
2415static int hci_send_frame(struct sk_buff *skb)
2416{
2417 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2418
2419 if (!hdev) {
2420 kfree_skb(skb);
2421 return -ENODEV;
2422 }
2423
2424 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2425
2426 /* Time stamp */
2427 __net_timestamp(skb);
2428
2429 /* Send copy to monitor */
2430 hci_send_to_monitor(hdev, skb);
2431
2432 if (atomic_read(&hdev->promisc)) {
2433 /* Send copy to the sockets */
2434 hci_send_to_sock(hdev, skb);
2435 }
2436
2437 /* Get rid of skb owner, prior to sending to the driver. */
2438 skb_orphan(skb);
2439
2440 return hdev->send(skb);
2441}
2442
2443void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2444{
2445 skb_queue_head_init(&req->cmd_q);
2446 req->hdev = hdev;
2447 req->err = 0;
2448}
2449
2450int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2451{
2452 struct hci_dev *hdev = req->hdev;
2453 struct sk_buff *skb;
2454 unsigned long flags;
2455
2456 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2457
2458 /* If an error occured during request building, remove all HCI
2459 * commands queued on the HCI request queue.
2460 */
2461 if (req->err) {
2462 skb_queue_purge(&req->cmd_q);
2463 return req->err;
2464 }
2465
2466 /* Do not allow empty requests */
2467 if (skb_queue_empty(&req->cmd_q))
2468 return -ENODATA;
2469
2470 skb = skb_peek_tail(&req->cmd_q);
2471 bt_cb(skb)->req.complete = complete;
2472
2473 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2474 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2475 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2476
2477 queue_work(hdev->workqueue, &hdev->cmd_work);
2478
2479 return 0;
2480}
2481
2482static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2483 u32 plen, void *param)
2484{
2485 int len = HCI_COMMAND_HDR_SIZE + plen;
2486 struct hci_command_hdr *hdr;
2487 struct sk_buff *skb;
2488
2489 skb = bt_skb_alloc(len, GFP_ATOMIC);
2490 if (!skb)
2491 return NULL;
2492
2493 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2494 hdr->opcode = cpu_to_le16(opcode);
2495 hdr->plen = plen;
2496
2497 if (plen)
2498 memcpy(skb_put(skb, plen), param, plen);
2499
2500 BT_DBG("skb len %d", skb->len);
2501
2502 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2503 skb->dev = (void *) hdev;
2504
2505 return skb;
2506}
2507
2508/* Send HCI command */
2509int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2510{
2511 struct sk_buff *skb;
2512
2513 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2514
2515 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2516 if (!skb) {
2517 BT_ERR("%s no memory for command", hdev->name);
2518 return -ENOMEM;
2519 }
2520
2521 /* Stand-alone HCI commands must be flaged as
2522 * single-command requests.
2523 */
2524 bt_cb(skb)->req.start = true;
2525
2526 skb_queue_tail(&hdev->cmd_q, skb);
2527 queue_work(hdev->workqueue, &hdev->cmd_work);
2528
2529 return 0;
2530}
2531
2532/* Queue a command to an asynchronous HCI request */
2533void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2534{
2535 struct hci_dev *hdev = req->hdev;
2536 struct sk_buff *skb;
2537
2538 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2539
2540 /* If an error occured during request building, there is no point in
2541 * queueing the HCI command. We can simply return.
2542 */
2543 if (req->err)
2544 return;
2545
2546 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2547 if (!skb) {
2548 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2549 hdev->name, opcode);
2550 req->err = -ENOMEM;
2551 return;
2552 }
2553
2554 if (skb_queue_empty(&req->cmd_q))
2555 bt_cb(skb)->req.start = true;
2556
2557 skb_queue_tail(&req->cmd_q, skb);
2558}
2559
2560/* Get data from the previously sent command */
2561void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2562{
2563 struct hci_command_hdr *hdr;
2564
2565 if (!hdev->sent_cmd)
2566 return NULL;
2567
2568 hdr = (void *) hdev->sent_cmd->data;
2569
2570 if (hdr->opcode != cpu_to_le16(opcode))
2571 return NULL;
2572
2573 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2574
2575 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2576}
2577
2578/* Send ACL data */
2579static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2580{
2581 struct hci_acl_hdr *hdr;
2582 int len = skb->len;
2583
2584 skb_push(skb, HCI_ACL_HDR_SIZE);
2585 skb_reset_transport_header(skb);
2586 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2587 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2588 hdr->dlen = cpu_to_le16(len);
2589}
2590
2591static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2592 struct sk_buff *skb, __u16 flags)
2593{
2594 struct hci_conn *conn = chan->conn;
2595 struct hci_dev *hdev = conn->hdev;
2596 struct sk_buff *list;
2597
2598 skb->len = skb_headlen(skb);
2599 skb->data_len = 0;
2600
2601 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2602
2603 switch (hdev->dev_type) {
2604 case HCI_BREDR:
2605 hci_add_acl_hdr(skb, conn->handle, flags);
2606 break;
2607 case HCI_AMP:
2608 hci_add_acl_hdr(skb, chan->handle, flags);
2609 break;
2610 default:
2611 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2612 return;
2613 }
2614
2615 list = skb_shinfo(skb)->frag_list;
2616 if (!list) {
2617 /* Non fragmented */
2618 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2619
2620 skb_queue_tail(queue, skb);
2621 } else {
2622 /* Fragmented */
2623 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2624
2625 skb_shinfo(skb)->frag_list = NULL;
2626
2627 /* Queue all fragments atomically */
2628 spin_lock(&queue->lock);
2629
2630 __skb_queue_tail(queue, skb);
2631
2632 flags &= ~ACL_START;
2633 flags |= ACL_CONT;
2634 do {
2635 skb = list; list = list->next;
2636
2637 skb->dev = (void *) hdev;
2638 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2639 hci_add_acl_hdr(skb, conn->handle, flags);
2640
2641 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2642
2643 __skb_queue_tail(queue, skb);
2644 } while (list);
2645
2646 spin_unlock(&queue->lock);
2647 }
2648}
2649
2650void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2651{
2652 struct hci_dev *hdev = chan->conn->hdev;
2653
2654 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2655
2656 skb->dev = (void *) hdev;
2657
2658 hci_queue_acl(chan, &chan->data_q, skb, flags);
2659
2660 queue_work(hdev->workqueue, &hdev->tx_work);
2661}
2662
2663/* Send SCO data */
2664void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2665{
2666 struct hci_dev *hdev = conn->hdev;
2667 struct hci_sco_hdr hdr;
2668
2669 BT_DBG("%s len %d", hdev->name, skb->len);
2670
2671 hdr.handle = cpu_to_le16(conn->handle);
2672 hdr.dlen = skb->len;
2673
2674 skb_push(skb, HCI_SCO_HDR_SIZE);
2675 skb_reset_transport_header(skb);
2676 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2677
2678 skb->dev = (void *) hdev;
2679 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2680
2681 skb_queue_tail(&conn->data_q, skb);
2682 queue_work(hdev->workqueue, &hdev->tx_work);
2683}
2684
2685/* ---- HCI TX task (outgoing data) ---- */
2686
2687/* HCI Connection scheduler */
2688static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2689 int *quote)
2690{
2691 struct hci_conn_hash *h = &hdev->conn_hash;
2692 struct hci_conn *conn = NULL, *c;
2693 unsigned int num = 0, min = ~0;
2694
2695 /* We don't have to lock device here. Connections are always
2696 * added and removed with TX task disabled. */
2697
2698 rcu_read_lock();
2699
2700 list_for_each_entry_rcu(c, &h->list, list) {
2701 if (c->type != type || skb_queue_empty(&c->data_q))
2702 continue;
2703
2704 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2705 continue;
2706
2707 num++;
2708
2709 if (c->sent < min) {
2710 min = c->sent;
2711 conn = c;
2712 }
2713
2714 if (hci_conn_num(hdev, type) == num)
2715 break;
2716 }
2717
2718 rcu_read_unlock();
2719
2720 if (conn) {
2721 int cnt, q;
2722
2723 switch (conn->type) {
2724 case ACL_LINK:
2725 cnt = hdev->acl_cnt;
2726 break;
2727 case SCO_LINK:
2728 case ESCO_LINK:
2729 cnt = hdev->sco_cnt;
2730 break;
2731 case LE_LINK:
2732 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2733 break;
2734 default:
2735 cnt = 0;
2736 BT_ERR("Unknown link type");
2737 }
2738
2739 q = cnt / num;
2740 *quote = q ? q : 1;
2741 } else
2742 *quote = 0;
2743
2744 BT_DBG("conn %p quote %d", conn, *quote);
2745 return conn;
2746}
2747
2748static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2749{
2750 struct hci_conn_hash *h = &hdev->conn_hash;
2751 struct hci_conn *c;
2752
2753 BT_ERR("%s link tx timeout", hdev->name);
2754
2755 rcu_read_lock();
2756
2757 /* Kill stalled connections */
2758 list_for_each_entry_rcu(c, &h->list, list) {
2759 if (c->type == type && c->sent) {
2760 BT_ERR("%s killing stalled connection %pMR",
2761 hdev->name, &c->dst);
2762 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2763 }
2764 }
2765
2766 rcu_read_unlock();
2767}
2768
2769static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2770 int *quote)
2771{
2772 struct hci_conn_hash *h = &hdev->conn_hash;
2773 struct hci_chan *chan = NULL;
2774 unsigned int num = 0, min = ~0, cur_prio = 0;
2775 struct hci_conn *conn;
2776 int cnt, q, conn_num = 0;
2777
2778 BT_DBG("%s", hdev->name);
2779
2780 rcu_read_lock();
2781
2782 list_for_each_entry_rcu(conn, &h->list, list) {
2783 struct hci_chan *tmp;
2784
2785 if (conn->type != type)
2786 continue;
2787
2788 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2789 continue;
2790
2791 conn_num++;
2792
2793 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2794 struct sk_buff *skb;
2795
2796 if (skb_queue_empty(&tmp->data_q))
2797 continue;
2798
2799 skb = skb_peek(&tmp->data_q);
2800 if (skb->priority < cur_prio)
2801 continue;
2802
2803 if (skb->priority > cur_prio) {
2804 num = 0;
2805 min = ~0;
2806 cur_prio = skb->priority;
2807 }
2808
2809 num++;
2810
2811 if (conn->sent < min) {
2812 min = conn->sent;
2813 chan = tmp;
2814 }
2815 }
2816
2817 if (hci_conn_num(hdev, type) == conn_num)
2818 break;
2819 }
2820
2821 rcu_read_unlock();
2822
2823 if (!chan)
2824 return NULL;
2825
2826 switch (chan->conn->type) {
2827 case ACL_LINK:
2828 cnt = hdev->acl_cnt;
2829 break;
2830 case AMP_LINK:
2831 cnt = hdev->block_cnt;
2832 break;
2833 case SCO_LINK:
2834 case ESCO_LINK:
2835 cnt = hdev->sco_cnt;
2836 break;
2837 case LE_LINK:
2838 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2839 break;
2840 default:
2841 cnt = 0;
2842 BT_ERR("Unknown link type");
2843 }
2844
2845 q = cnt / num;
2846 *quote = q ? q : 1;
2847 BT_DBG("chan %p quote %d", chan, *quote);
2848 return chan;
2849}
2850
2851static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2852{
2853 struct hci_conn_hash *h = &hdev->conn_hash;
2854 struct hci_conn *conn;
2855 int num = 0;
2856
2857 BT_DBG("%s", hdev->name);
2858
2859 rcu_read_lock();
2860
2861 list_for_each_entry_rcu(conn, &h->list, list) {
2862 struct hci_chan *chan;
2863
2864 if (conn->type != type)
2865 continue;
2866
2867 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2868 continue;
2869
2870 num++;
2871
2872 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2873 struct sk_buff *skb;
2874
2875 if (chan->sent) {
2876 chan->sent = 0;
2877 continue;
2878 }
2879
2880 if (skb_queue_empty(&chan->data_q))
2881 continue;
2882
2883 skb = skb_peek(&chan->data_q);
2884 if (skb->priority >= HCI_PRIO_MAX - 1)
2885 continue;
2886
2887 skb->priority = HCI_PRIO_MAX - 1;
2888
2889 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2890 skb->priority);
2891 }
2892
2893 if (hci_conn_num(hdev, type) == num)
2894 break;
2895 }
2896
2897 rcu_read_unlock();
2898
2899}
2900
2901static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2902{
2903 /* Calculate count of blocks used by this packet */
2904 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2905}
2906
2907static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2908{
2909 if (!test_bit(HCI_RAW, &hdev->flags)) {
2910 /* ACL tx timeout must be longer than maximum
2911 * link supervision timeout (40.9 seconds) */
2912 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2913 HCI_ACL_TX_TIMEOUT))
2914 hci_link_tx_to(hdev, ACL_LINK);
2915 }
2916}
2917
2918static void hci_sched_acl_pkt(struct hci_dev *hdev)
2919{
2920 unsigned int cnt = hdev->acl_cnt;
2921 struct hci_chan *chan;
2922 struct sk_buff *skb;
2923 int quote;
2924
2925 __check_timeout(hdev, cnt);
2926
2927 while (hdev->acl_cnt &&
2928 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2929 u32 priority = (skb_peek(&chan->data_q))->priority;
2930 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2931 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2932 skb->len, skb->priority);
2933
2934 /* Stop if priority has changed */
2935 if (skb->priority < priority)
2936 break;
2937
2938 skb = skb_dequeue(&chan->data_q);
2939
2940 hci_conn_enter_active_mode(chan->conn,
2941 bt_cb(skb)->force_active);
2942
2943 hci_send_frame(skb);
2944 hdev->acl_last_tx = jiffies;
2945
2946 hdev->acl_cnt--;
2947 chan->sent++;
2948 chan->conn->sent++;
2949 }
2950 }
2951
2952 if (cnt != hdev->acl_cnt)
2953 hci_prio_recalculate(hdev, ACL_LINK);
2954}
2955
2956static void hci_sched_acl_blk(struct hci_dev *hdev)
2957{
2958 unsigned int cnt = hdev->block_cnt;
2959 struct hci_chan *chan;
2960 struct sk_buff *skb;
2961 int quote;
2962 u8 type;
2963
2964 __check_timeout(hdev, cnt);
2965
2966 BT_DBG("%s", hdev->name);
2967
2968 if (hdev->dev_type == HCI_AMP)
2969 type = AMP_LINK;
2970 else
2971 type = ACL_LINK;
2972
2973 while (hdev->block_cnt > 0 &&
2974 (chan = hci_chan_sent(hdev, type, &quote))) {
2975 u32 priority = (skb_peek(&chan->data_q))->priority;
2976 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2977 int blocks;
2978
2979 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2980 skb->len, skb->priority);
2981
2982 /* Stop if priority has changed */
2983 if (skb->priority < priority)
2984 break;
2985
2986 skb = skb_dequeue(&chan->data_q);
2987
2988 blocks = __get_blocks(hdev, skb);
2989 if (blocks > hdev->block_cnt)
2990 return;
2991
2992 hci_conn_enter_active_mode(chan->conn,
2993 bt_cb(skb)->force_active);
2994
2995 hci_send_frame(skb);
2996 hdev->acl_last_tx = jiffies;
2997
2998 hdev->block_cnt -= blocks;
2999 quote -= blocks;
3000
3001 chan->sent += blocks;
3002 chan->conn->sent += blocks;
3003 }
3004 }
3005
3006 if (cnt != hdev->block_cnt)
3007 hci_prio_recalculate(hdev, type);
3008}
3009
3010static void hci_sched_acl(struct hci_dev *hdev)
3011{
3012 BT_DBG("%s", hdev->name);
3013
3014 /* No ACL link over BR/EDR controller */
3015 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3016 return;
3017
3018 /* No AMP link over AMP controller */
3019 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3020 return;
3021
3022 switch (hdev->flow_ctl_mode) {
3023 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3024 hci_sched_acl_pkt(hdev);
3025 break;
3026
3027 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3028 hci_sched_acl_blk(hdev);
3029 break;
3030 }
3031}
3032
3033/* Schedule SCO */
3034static void hci_sched_sco(struct hci_dev *hdev)
3035{
3036 struct hci_conn *conn;
3037 struct sk_buff *skb;
3038 int quote;
3039
3040 BT_DBG("%s", hdev->name);
3041
3042 if (!hci_conn_num(hdev, SCO_LINK))
3043 return;
3044
3045 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3046 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3047 BT_DBG("skb %p len %d", skb, skb->len);
3048 hci_send_frame(skb);
3049
3050 conn->sent++;
3051 if (conn->sent == ~0)
3052 conn->sent = 0;
3053 }
3054 }
3055}
3056
3057static void hci_sched_esco(struct hci_dev *hdev)
3058{
3059 struct hci_conn *conn;
3060 struct sk_buff *skb;
3061 int quote;
3062
3063 BT_DBG("%s", hdev->name);
3064
3065 if (!hci_conn_num(hdev, ESCO_LINK))
3066 return;
3067
3068 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3069 &quote))) {
3070 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3071 BT_DBG("skb %p len %d", skb, skb->len);
3072 hci_send_frame(skb);
3073
3074 conn->sent++;
3075 if (conn->sent == ~0)
3076 conn->sent = 0;
3077 }
3078 }
3079}
3080
3081static void hci_sched_le(struct hci_dev *hdev)
3082{
3083 struct hci_chan *chan;
3084 struct sk_buff *skb;
3085 int quote, cnt, tmp;
3086
3087 BT_DBG("%s", hdev->name);
3088
3089 if (!hci_conn_num(hdev, LE_LINK))
3090 return;
3091
3092 if (!test_bit(HCI_RAW, &hdev->flags)) {
3093 /* LE tx timeout must be longer than maximum
3094 * link supervision timeout (40.9 seconds) */
3095 if (!hdev->le_cnt && hdev->le_pkts &&
3096 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3097 hci_link_tx_to(hdev, LE_LINK);
3098 }
3099
3100 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3101 tmp = cnt;
3102 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3103 u32 priority = (skb_peek(&chan->data_q))->priority;
3104 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3105 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3106 skb->len, skb->priority);
3107
3108 /* Stop if priority has changed */
3109 if (skb->priority < priority)
3110 break;
3111
3112 skb = skb_dequeue(&chan->data_q);
3113
3114 hci_send_frame(skb);
3115 hdev->le_last_tx = jiffies;
3116
3117 cnt--;
3118 chan->sent++;
3119 chan->conn->sent++;
3120 }
3121 }
3122
3123 if (hdev->le_pkts)
3124 hdev->le_cnt = cnt;
3125 else
3126 hdev->acl_cnt = cnt;
3127
3128 if (cnt != tmp)
3129 hci_prio_recalculate(hdev, LE_LINK);
3130}
3131
3132static void hci_tx_work(struct work_struct *work)
3133{
3134 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3135 struct sk_buff *skb;
3136
3137 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3138 hdev->sco_cnt, hdev->le_cnt);
3139
3140 /* Schedule queues and send stuff to HCI driver */
3141
3142 hci_sched_acl(hdev);
3143
3144 hci_sched_sco(hdev);
3145
3146 hci_sched_esco(hdev);
3147
3148 hci_sched_le(hdev);
3149
3150 /* Send next queued raw (unknown type) packet */
3151 while ((skb = skb_dequeue(&hdev->raw_q)))
3152 hci_send_frame(skb);
3153}
3154
3155/* ----- HCI RX task (incoming data processing) ----- */
3156
3157/* ACL data packet */
3158static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3159{
3160 struct hci_acl_hdr *hdr = (void *) skb->data;
3161 struct hci_conn *conn;
3162 __u16 handle, flags;
3163
3164 skb_pull(skb, HCI_ACL_HDR_SIZE);
3165
3166 handle = __le16_to_cpu(hdr->handle);
3167 flags = hci_flags(handle);
3168 handle = hci_handle(handle);
3169
3170 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3171 handle, flags);
3172
3173 hdev->stat.acl_rx++;
3174
3175 hci_dev_lock(hdev);
3176 conn = hci_conn_hash_lookup_handle(hdev, handle);
3177 hci_dev_unlock(hdev);
3178
3179 if (conn) {
3180 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3181
3182 /* Send to upper protocol */
3183 l2cap_recv_acldata(conn, skb, flags);
3184 return;
3185 } else {
3186 BT_ERR("%s ACL packet for unknown connection handle %d",
3187 hdev->name, handle);
3188 }
3189
3190 kfree_skb(skb);
3191}
3192
3193/* SCO data packet */
3194static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3195{
3196 struct hci_sco_hdr *hdr = (void *) skb->data;
3197 struct hci_conn *conn;
3198 __u16 handle;
3199
3200 skb_pull(skb, HCI_SCO_HDR_SIZE);
3201
3202 handle = __le16_to_cpu(hdr->handle);
3203
3204 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3205
3206 hdev->stat.sco_rx++;
3207
3208 hci_dev_lock(hdev);
3209 conn = hci_conn_hash_lookup_handle(hdev, handle);
3210 hci_dev_unlock(hdev);
3211
3212 if (conn) {
3213 /* Send to upper protocol */
3214 sco_recv_scodata(conn, skb);
3215 return;
3216 } else {
3217 BT_ERR("%s SCO packet for unknown connection handle %d",
3218 hdev->name, handle);
3219 }
3220
3221 kfree_skb(skb);
3222}
3223
3224static bool hci_req_is_complete(struct hci_dev *hdev)
3225{
3226 struct sk_buff *skb;
3227
3228 skb = skb_peek(&hdev->cmd_q);
3229 if (!skb)
3230 return true;
3231
3232 return bt_cb(skb)->req.start;
3233}
3234
3235static void hci_resend_last(struct hci_dev *hdev)
3236{
3237 struct hci_command_hdr *sent;
3238 struct sk_buff *skb;
3239 u16 opcode;
3240
3241 if (!hdev->sent_cmd)
3242 return;
3243
3244 sent = (void *) hdev->sent_cmd->data;
3245 opcode = __le16_to_cpu(sent->opcode);
3246 if (opcode == HCI_OP_RESET)
3247 return;
3248
3249 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3250 if (!skb)
3251 return;
3252
3253 skb_queue_head(&hdev->cmd_q, skb);
3254 queue_work(hdev->workqueue, &hdev->cmd_work);
3255}
3256
3257void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3258{
3259 hci_req_complete_t req_complete = NULL;
3260 struct sk_buff *skb;
3261 unsigned long flags;
3262
3263 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3264
3265 /* If the completed command doesn't match the last one that was
3266 * sent we need to do special handling of it.
3267 */
3268 if (!hci_sent_cmd_data(hdev, opcode)) {
3269 /* Some CSR based controllers generate a spontaneous
3270 * reset complete event during init and any pending
3271 * command will never be completed. In such a case we
3272 * need to resend whatever was the last sent
3273 * command.
3274 */
3275 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3276 hci_resend_last(hdev);
3277
3278 return;
3279 }
3280
3281 /* If the command succeeded and there's still more commands in
3282 * this request the request is not yet complete.
3283 */
3284 if (!status && !hci_req_is_complete(hdev))
3285 return;
3286
3287 /* If this was the last command in a request the complete
3288 * callback would be found in hdev->sent_cmd instead of the
3289 * command queue (hdev->cmd_q).
3290 */
3291 if (hdev->sent_cmd) {
3292 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3293 if (req_complete)
3294 goto call_complete;
3295 }
3296
3297 /* Remove all pending commands belonging to this request */
3298 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3299 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3300 if (bt_cb(skb)->req.start) {
3301 __skb_queue_head(&hdev->cmd_q, skb);
3302 break;
3303 }
3304
3305 req_complete = bt_cb(skb)->req.complete;
3306 kfree_skb(skb);
3307 }
3308 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3309
3310call_complete:
3311 if (req_complete)
3312 req_complete(hdev, status);
3313}
3314
3315void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3316{
3317 hci_req_complete_t req_complete = NULL;
3318
3319 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3320
3321 if (status) {
3322 hci_req_cmd_complete(hdev, opcode, status);
3323 return;
3324 }
3325
3326 /* No need to handle success status if there are more commands */
3327 if (!hci_req_is_complete(hdev))
3328 return;
3329
3330 if (hdev->sent_cmd)
3331 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3332
3333 /* If the request doesn't have a complete callback or there
3334 * are other commands/requests in the hdev queue we consider
3335 * this request as completed.
3336 */
3337 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3338 hci_req_cmd_complete(hdev, opcode, status);
3339}
3340
3341static void hci_rx_work(struct work_struct *work)
3342{
3343 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3344 struct sk_buff *skb;
3345
3346 BT_DBG("%s", hdev->name);
3347
3348 while ((skb = skb_dequeue(&hdev->rx_q))) {
3349 /* Send copy to monitor */
3350 hci_send_to_monitor(hdev, skb);
3351
3352 if (atomic_read(&hdev->promisc)) {
3353 /* Send copy to the sockets */
3354 hci_send_to_sock(hdev, skb);
3355 }
3356
3357 if (test_bit(HCI_RAW, &hdev->flags)) {
3358 kfree_skb(skb);
3359 continue;
3360 }
3361
3362 if (test_bit(HCI_INIT, &hdev->flags)) {
3363 /* Don't process data packets in this states. */
3364 switch (bt_cb(skb)->pkt_type) {
3365 case HCI_ACLDATA_PKT:
3366 case HCI_SCODATA_PKT:
3367 kfree_skb(skb);
3368 continue;
3369 }
3370 }
3371
3372 /* Process frame */
3373 switch (bt_cb(skb)->pkt_type) {
3374 case HCI_EVENT_PKT:
3375 BT_DBG("%s Event packet", hdev->name);
3376 hci_event_packet(hdev, skb);
3377 break;
3378
3379 case HCI_ACLDATA_PKT:
3380 BT_DBG("%s ACL data packet", hdev->name);
3381 hci_acldata_packet(hdev, skb);
3382 break;
3383
3384 case HCI_SCODATA_PKT:
3385 BT_DBG("%s SCO data packet", hdev->name);
3386 hci_scodata_packet(hdev, skb);
3387 break;
3388
3389 default:
3390 kfree_skb(skb);
3391 break;
3392 }
3393 }
3394}
3395
3396static void hci_cmd_work(struct work_struct *work)
3397{
3398 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3399 struct sk_buff *skb;
3400
3401 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3402 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3403
3404 /* Send queued commands */
3405 if (atomic_read(&hdev->cmd_cnt)) {
3406 skb = skb_dequeue(&hdev->cmd_q);
3407 if (!skb)
3408 return;
3409
3410 kfree_skb(hdev->sent_cmd);
3411
3412 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3413 if (hdev->sent_cmd) {
3414 atomic_dec(&hdev->cmd_cnt);
3415 hci_send_frame(skb);
3416 if (test_bit(HCI_RESET, &hdev->flags))
3417 del_timer(&hdev->cmd_timer);
3418 else
3419 mod_timer(&hdev->cmd_timer,
3420 jiffies + HCI_CMD_TIMEOUT);
3421 } else {
3422 skb_queue_head(&hdev->cmd_q, skb);
3423 queue_work(hdev->workqueue, &hdev->cmd_work);
3424 }
3425 }
3426}
3427
3428int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3429{
3430 /* General inquiry access code (GIAC) */
3431 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3432 struct hci_cp_inquiry cp;
3433
3434 BT_DBG("%s", hdev->name);
3435
3436 if (test_bit(HCI_INQUIRY, &hdev->flags))
3437 return -EINPROGRESS;
3438
3439 inquiry_cache_flush(hdev);
3440
3441 memset(&cp, 0, sizeof(cp));
3442 memcpy(&cp.lap, lap, sizeof(cp.lap));
3443 cp.length = length;
3444
3445 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3446}
3447
3448int hci_cancel_inquiry(struct hci_dev *hdev)
3449{
3450 BT_DBG("%s", hdev->name);
3451
3452 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3453 return -EALREADY;
3454
3455 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3456}
3457
3458u8 bdaddr_to_le(u8 bdaddr_type)
3459{
3460 switch (bdaddr_type) {
3461 case BDADDR_LE_PUBLIC:
3462 return ADDR_LE_DEV_PUBLIC;
3463
3464 default:
3465 /* Fallback to LE Random address type */
3466 return ADDR_LE_DEV_RANDOM;
3467 }
3468}