Bluetooth: Perform Class of Device changes through hdev->req_workqueue
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / net / bluetooth / hci_request.c
CommitLineData
0857dd3b
JH
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
f2252570 26#include <net/bluetooth/mgmt.h>
0857dd3b
JH
27
28#include "smp.h"
29#include "hci_request.h"
30
be91cd05
JH
31#define HCI_REQ_DONE 0
32#define HCI_REQ_PEND 1
33#define HCI_REQ_CANCELED 2
34
0857dd3b
JH
35void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36{
37 skb_queue_head_init(&req->cmd_q);
38 req->hdev = hdev;
39 req->err = 0;
40}
41
e6214487
JH
42static int req_run(struct hci_request *req, hci_req_complete_t complete,
43 hci_req_complete_skb_t complete_skb)
0857dd3b
JH
44{
45 struct hci_dev *hdev = req->hdev;
46 struct sk_buff *skb;
47 unsigned long flags;
48
49 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
50
51 /* If an error occurred during request building, remove all HCI
52 * commands queued on the HCI request queue.
53 */
54 if (req->err) {
55 skb_queue_purge(&req->cmd_q);
56 return req->err;
57 }
58
59 /* Do not allow empty requests */
60 if (skb_queue_empty(&req->cmd_q))
61 return -ENODATA;
62
63 skb = skb_peek_tail(&req->cmd_q);
44d27137
JH
64 if (complete) {
65 bt_cb(skb)->hci.req_complete = complete;
66 } else if (complete_skb) {
67 bt_cb(skb)->hci.req_complete_skb = complete_skb;
68 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
69 }
0857dd3b
JH
70
71 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
72 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
73 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
74
75 queue_work(hdev->workqueue, &hdev->cmd_work);
76
77 return 0;
78}
79
e6214487
JH
80int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
81{
82 return req_run(req, complete, NULL);
83}
84
85int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
86{
87 return req_run(req, NULL, complete);
88}
89
be91cd05
JH
90static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
91 struct sk_buff *skb)
92{
93 BT_DBG("%s result 0x%2.2x", hdev->name, result);
94
95 if (hdev->req_status == HCI_REQ_PEND) {
96 hdev->req_result = result;
97 hdev->req_status = HCI_REQ_DONE;
98 if (skb)
99 hdev->req_skb = skb_get(skb);
100 wake_up_interruptible(&hdev->req_wait_q);
101 }
102}
103
b504430c 104void hci_req_sync_cancel(struct hci_dev *hdev, int err)
be91cd05
JH
105{
106 BT_DBG("%s err 0x%2.2x", hdev->name, err);
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = err;
110 hdev->req_status = HCI_REQ_CANCELED;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
116 const void *param, u8 event, u32 timeout)
117{
118 DECLARE_WAITQUEUE(wait, current);
119 struct hci_request req;
120 struct sk_buff *skb;
121 int err = 0;
122
123 BT_DBG("%s", hdev->name);
124
125 hci_req_init(&req, hdev);
126
127 hci_req_add_ev(&req, opcode, plen, param, event);
128
129 hdev->req_status = HCI_REQ_PEND;
130
131 add_wait_queue(&hdev->req_wait_q, &wait);
132 set_current_state(TASK_INTERRUPTIBLE);
133
134 err = hci_req_run_skb(&req, hci_req_sync_complete);
135 if (err < 0) {
136 remove_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_RUNNING);
138 return ERR_PTR(err);
139 }
140
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return ERR_PTR(-EINTR);
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
150 err = -bt_to_errno(hdev->req_result);
151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
160 }
161
162 hdev->req_status = hdev->req_result = 0;
163 skb = hdev->req_skb;
164 hdev->req_skb = NULL;
165
166 BT_DBG("%s end: err %d", hdev->name, err);
167
168 if (err < 0) {
169 kfree_skb(skb);
170 return ERR_PTR(err);
171 }
172
173 if (!skb)
174 return ERR_PTR(-ENODATA);
175
176 return skb;
177}
178EXPORT_SYMBOL(__hci_cmd_sync_ev);
179
180struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
181 const void *param, u32 timeout)
182{
183 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
184}
185EXPORT_SYMBOL(__hci_cmd_sync);
186
187/* Execute request and wait for completion. */
a1d01db1
JH
188int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
189 unsigned long opt),
4ebeee2d 190 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
191{
192 struct hci_request req;
193 DECLARE_WAITQUEUE(wait, current);
194 int err = 0;
195
196 BT_DBG("%s start", hdev->name);
197
198 hci_req_init(&req, hdev);
199
200 hdev->req_status = HCI_REQ_PEND;
201
a1d01db1
JH
202 err = func(&req, opt);
203 if (err) {
204 if (hci_status)
205 *hci_status = HCI_ERROR_UNSPECIFIED;
206 return err;
207 }
be91cd05
JH
208
209 add_wait_queue(&hdev->req_wait_q, &wait);
210 set_current_state(TASK_INTERRUPTIBLE);
211
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
216 remove_wait_queue(&hdev->req_wait_q, &wait);
217 set_current_state(TASK_RUNNING);
218
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
223 */
568f44f6
JH
224 if (err == -ENODATA) {
225 if (hci_status)
226 *hci_status = 0;
be91cd05 227 return 0;
568f44f6
JH
228 }
229
230 if (hci_status)
231 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
232
233 return err;
234 }
235
236 schedule_timeout(timeout);
237
238 remove_wait_queue(&hdev->req_wait_q, &wait);
239
240 if (signal_pending(current))
241 return -EINTR;
242
243 switch (hdev->req_status) {
244 case HCI_REQ_DONE:
245 err = -bt_to_errno(hdev->req_result);
4ebeee2d
JH
246 if (hci_status)
247 *hci_status = hdev->req_result;
be91cd05
JH
248 break;
249
250 case HCI_REQ_CANCELED:
251 err = -hdev->req_result;
4ebeee2d
JH
252 if (hci_status)
253 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
254 break;
255
256 default:
257 err = -ETIMEDOUT;
4ebeee2d
JH
258 if (hci_status)
259 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
260 break;
261 }
262
263 hdev->req_status = hdev->req_result = 0;
264
265 BT_DBG("%s end: err %d", hdev->name, err);
266
267 return err;
268}
269
a1d01db1
JH
270int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
271 unsigned long opt),
4ebeee2d 272 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
273{
274 int ret;
275
276 if (!test_bit(HCI_UP, &hdev->flags))
277 return -ENETDOWN;
278
279 /* Serialize all requests */
b504430c 280 hci_req_sync_lock(hdev);
4ebeee2d 281 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
b504430c 282 hci_req_sync_unlock(hdev);
be91cd05
JH
283
284 return ret;
285}
286
0857dd3b
JH
287struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
288 const void *param)
289{
290 int len = HCI_COMMAND_HDR_SIZE + plen;
291 struct hci_command_hdr *hdr;
292 struct sk_buff *skb;
293
294 skb = bt_skb_alloc(len, GFP_ATOMIC);
295 if (!skb)
296 return NULL;
297
298 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
299 hdr->opcode = cpu_to_le16(opcode);
300 hdr->plen = plen;
301
302 if (plen)
303 memcpy(skb_put(skb, plen), param, plen);
304
305 BT_DBG("skb len %d", skb->len);
306
d79f34e3
MH
307 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
308 hci_skb_opcode(skb) = opcode;
0857dd3b
JH
309
310 return skb;
311}
312
313/* Queue a command to an asynchronous HCI request */
314void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
315 const void *param, u8 event)
316{
317 struct hci_dev *hdev = req->hdev;
318 struct sk_buff *skb;
319
320 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
321
322 /* If an error occurred during request building, there is no point in
323 * queueing the HCI command. We can simply return.
324 */
325 if (req->err)
326 return;
327
328 skb = hci_prepare_cmd(hdev, opcode, plen, param);
329 if (!skb) {
330 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
331 hdev->name, opcode);
332 req->err = -ENOMEM;
333 return;
334 }
335
336 if (skb_queue_empty(&req->cmd_q))
44d27137 337 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
0857dd3b 338
242c0ebd 339 bt_cb(skb)->hci.req_event = event;
0857dd3b
JH
340
341 skb_queue_tail(&req->cmd_q, skb);
342}
343
344void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
345 const void *param)
346{
347 hci_req_add_ev(req, opcode, plen, param, 0);
348}
349
196a5e97
JH
350/* This function controls the background scanning based on hdev->pend_le_conns
351 * list. If there are pending LE connection we start the background scanning,
352 * otherwise we stop it.
353 *
354 * This function requires the caller holds hdev->lock.
355 */
356static void __hci_update_background_scan(struct hci_request *req)
357{
358 struct hci_dev *hdev = req->hdev;
359
360 if (!test_bit(HCI_UP, &hdev->flags) ||
361 test_bit(HCI_INIT, &hdev->flags) ||
362 hci_dev_test_flag(hdev, HCI_SETUP) ||
363 hci_dev_test_flag(hdev, HCI_CONFIG) ||
364 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
365 hci_dev_test_flag(hdev, HCI_UNREGISTER))
366 return;
367
368 /* No point in doing scanning if LE support hasn't been enabled */
369 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
370 return;
371
372 /* If discovery is active don't interfere with it */
373 if (hdev->discovery.state != DISCOVERY_STOPPED)
374 return;
375
376 /* Reset RSSI and UUID filters when starting background scanning
377 * since these filters are meant for service discovery only.
378 *
379 * The Start Discovery and Start Service Discovery operations
380 * ensure to set proper values for RSSI threshold and UUID
381 * filter list. So it is safe to just reset them here.
382 */
383 hci_discovery_filter_clear(hdev);
384
385 if (list_empty(&hdev->pend_le_conns) &&
386 list_empty(&hdev->pend_le_reports)) {
387 /* If there is no pending LE connections or devices
388 * to be scanned for, we should stop the background
389 * scanning.
390 */
391
392 /* If controller is not scanning we are done. */
393 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
394 return;
395
396 hci_req_add_le_scan_disable(req);
397
398 BT_DBG("%s stopping background scanning", hdev->name);
399 } else {
400 /* If there is at least one pending LE connection, we should
401 * keep the background scan running.
402 */
403
404 /* If controller is connecting, we should not start scanning
405 * since some controllers are not able to scan and connect at
406 * the same time.
407 */
408 if (hci_lookup_le_connect(hdev))
409 return;
410
411 /* If controller is currently scanning, we stop it to ensure we
412 * don't miss any advertising (due to duplicates filter).
413 */
414 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
415 hci_req_add_le_scan_disable(req);
416
417 hci_req_add_le_passive_scan(req);
418
419 BT_DBG("%s starting background scanning", hdev->name);
420 }
421}
422
0857dd3b
JH
423void hci_req_add_le_scan_disable(struct hci_request *req)
424{
425 struct hci_cp_le_set_scan_enable cp;
426
427 memset(&cp, 0, sizeof(cp));
428 cp.enable = LE_SCAN_DISABLE;
429 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
430}
431
432static void add_to_white_list(struct hci_request *req,
433 struct hci_conn_params *params)
434{
435 struct hci_cp_le_add_to_white_list cp;
436
437 cp.bdaddr_type = params->addr_type;
438 bacpy(&cp.bdaddr, &params->addr);
439
440 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
441}
442
443static u8 update_white_list(struct hci_request *req)
444{
445 struct hci_dev *hdev = req->hdev;
446 struct hci_conn_params *params;
447 struct bdaddr_list *b;
448 uint8_t white_list_entries = 0;
449
450 /* Go through the current white list programmed into the
451 * controller one by one and check if that address is still
452 * in the list of pending connections or list of devices to
453 * report. If not present in either list, then queue the
454 * command to remove it from the controller.
455 */
456 list_for_each_entry(b, &hdev->le_white_list, list) {
457 struct hci_cp_le_del_from_white_list cp;
458
459 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
460 &b->bdaddr, b->bdaddr_type) ||
461 hci_pend_le_action_lookup(&hdev->pend_le_reports,
462 &b->bdaddr, b->bdaddr_type)) {
463 white_list_entries++;
464 continue;
465 }
466
467 cp.bdaddr_type = b->bdaddr_type;
468 bacpy(&cp.bdaddr, &b->bdaddr);
469
470 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
471 sizeof(cp), &cp);
472 }
473
474 /* Since all no longer valid white list entries have been
475 * removed, walk through the list of pending connections
476 * and ensure that any new device gets programmed into
477 * the controller.
478 *
479 * If the list of the devices is larger than the list of
480 * available white list entries in the controller, then
481 * just abort and return filer policy value to not use the
482 * white list.
483 */
484 list_for_each_entry(params, &hdev->pend_le_conns, action) {
485 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
486 &params->addr, params->addr_type))
487 continue;
488
489 if (white_list_entries >= hdev->le_white_list_size) {
490 /* Select filter policy to accept all advertising */
491 return 0x00;
492 }
493
494 if (hci_find_irk_by_addr(hdev, &params->addr,
495 params->addr_type)) {
496 /* White list can not be used with RPAs */
497 return 0x00;
498 }
499
500 white_list_entries++;
501 add_to_white_list(req, params);
502 }
503
504 /* After adding all new pending connections, walk through
505 * the list of pending reports and also add these to the
506 * white list if there is still space.
507 */
508 list_for_each_entry(params, &hdev->pend_le_reports, action) {
509 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
510 &params->addr, params->addr_type))
511 continue;
512
513 if (white_list_entries >= hdev->le_white_list_size) {
514 /* Select filter policy to accept all advertising */
515 return 0x00;
516 }
517
518 if (hci_find_irk_by_addr(hdev, &params->addr,
519 params->addr_type)) {
520 /* White list can not be used with RPAs */
521 return 0x00;
522 }
523
524 white_list_entries++;
525 add_to_white_list(req, params);
526 }
527
528 /* Select filter policy to use white list */
529 return 0x01;
530}
531
532void hci_req_add_le_passive_scan(struct hci_request *req)
533{
534 struct hci_cp_le_set_scan_param param_cp;
535 struct hci_cp_le_set_scan_enable enable_cp;
536 struct hci_dev *hdev = req->hdev;
537 u8 own_addr_type;
538 u8 filter_policy;
539
540 /* Set require_privacy to false since no SCAN_REQ are send
541 * during passive scanning. Not using an non-resolvable address
542 * here is important so that peer devices using direct
543 * advertising with our address will be correctly reported
544 * by the controller.
545 */
546 if (hci_update_random_address(req, false, &own_addr_type))
547 return;
548
549 /* Adding or removing entries from the white list must
550 * happen before enabling scanning. The controller does
551 * not allow white list modification while scanning.
552 */
553 filter_policy = update_white_list(req);
554
555 /* When the controller is using random resolvable addresses and
556 * with that having LE privacy enabled, then controllers with
557 * Extended Scanner Filter Policies support can now enable support
558 * for handling directed advertising.
559 *
560 * So instead of using filter polices 0x00 (no whitelist)
561 * and 0x01 (whitelist enabled) use the new filter policies
562 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
563 */
d7a5a11d 564 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
0857dd3b
JH
565 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
566 filter_policy |= 0x02;
567
568 memset(&param_cp, 0, sizeof(param_cp));
569 param_cp.type = LE_SCAN_PASSIVE;
570 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
571 param_cp.window = cpu_to_le16(hdev->le_scan_window);
572 param_cp.own_address_type = own_addr_type;
573 param_cp.filter_policy = filter_policy;
574 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
575 &param_cp);
576
577 memset(&enable_cp, 0, sizeof(enable_cp));
578 enable_cp.enable = LE_SCAN_ENABLE;
579 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
580 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
581 &enable_cp);
582}
583
f2252570
JH
584static u8 get_current_adv_instance(struct hci_dev *hdev)
585{
586 /* The "Set Advertising" setting supersedes the "Add Advertising"
587 * setting. Here we set the advertising data based on which
588 * setting was set. When neither apply, default to the global settings,
589 * represented by instance "0".
590 */
591 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
592 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
593 return hdev->cur_adv_instance;
594
595 return 0x00;
596}
597
598static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
599{
600 u8 instance = get_current_adv_instance(hdev);
601 struct adv_info *adv_instance;
602
603 /* Ignore instance 0 */
604 if (instance == 0x00)
605 return 0;
606
607 adv_instance = hci_find_adv_instance(hdev, instance);
608 if (!adv_instance)
609 return 0;
610
611 /* TODO: Take into account the "appearance" and "local-name" flags here.
612 * These are currently being ignored as they are not supported.
613 */
614 return adv_instance->scan_rsp_len;
615}
616
617void __hci_req_disable_advertising(struct hci_request *req)
618{
619 u8 enable = 0x00;
620
621 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
622}
623
624static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
625{
626 u32 flags;
627 struct adv_info *adv_instance;
628
629 if (instance == 0x00) {
630 /* Instance 0 always manages the "Tx Power" and "Flags"
631 * fields
632 */
633 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
634
635 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
636 * corresponds to the "connectable" instance flag.
637 */
638 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
639 flags |= MGMT_ADV_FLAG_CONNECTABLE;
640
641 return flags;
642 }
643
644 adv_instance = hci_find_adv_instance(hdev, instance);
645
646 /* Return 0 when we got an invalid instance identifier. */
647 if (!adv_instance)
648 return 0;
649
650 return adv_instance->flags;
651}
652
653void __hci_req_enable_advertising(struct hci_request *req)
654{
655 struct hci_dev *hdev = req->hdev;
656 struct hci_cp_le_set_adv_param cp;
657 u8 own_addr_type, enable = 0x01;
658 bool connectable;
659 u8 instance;
660 u32 flags;
661
662 if (hci_conn_num(hdev, LE_LINK) > 0)
663 return;
664
665 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
666 __hci_req_disable_advertising(req);
667
668 /* Clear the HCI_LE_ADV bit temporarily so that the
669 * hci_update_random_address knows that it's safe to go ahead
670 * and write a new random address. The flag will be set back on
671 * as soon as the SET_ADV_ENABLE HCI command completes.
672 */
673 hci_dev_clear_flag(hdev, HCI_LE_ADV);
674
675 instance = get_current_adv_instance(hdev);
676 flags = get_adv_instance_flags(hdev, instance);
677
678 /* If the "connectable" instance flag was not set, then choose between
679 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
680 */
681 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
682 mgmt_get_connectable(hdev);
683
684 /* Set require_privacy to true only when non-connectable
685 * advertising is used. In that case it is fine to use a
686 * non-resolvable private address.
687 */
688 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
689 return;
690
691 memset(&cp, 0, sizeof(cp));
692 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
693 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
694
695 if (connectable)
696 cp.type = LE_ADV_IND;
697 else if (get_cur_adv_instance_scan_rsp_len(hdev))
698 cp.type = LE_ADV_SCAN_IND;
699 else
700 cp.type = LE_ADV_NONCONN_IND;
701
702 cp.own_address_type = own_addr_type;
703 cp.channel_map = hdev->le_adv_channel_map;
704
705 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
706
707 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
708}
709
710static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
711{
712 u8 ad_len = 0;
713 size_t name_len;
714
715 name_len = strlen(hdev->dev_name);
716 if (name_len > 0) {
717 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
718
719 if (name_len > max_len) {
720 name_len = max_len;
721 ptr[1] = EIR_NAME_SHORT;
722 } else
723 ptr[1] = EIR_NAME_COMPLETE;
724
725 ptr[0] = name_len + 1;
726
727 memcpy(ptr + 2, hdev->dev_name, name_len);
728
729 ad_len += (name_len + 2);
730 ptr += (name_len + 2);
731 }
732
733 return ad_len;
734}
735
736static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
737 u8 *ptr)
738{
739 struct adv_info *adv_instance;
740
741 adv_instance = hci_find_adv_instance(hdev, instance);
742 if (!adv_instance)
743 return 0;
744
745 /* TODO: Set the appropriate entries based on advertising instance flags
746 * here once flags other than 0 are supported.
747 */
748 memcpy(ptr, adv_instance->scan_rsp_data,
749 adv_instance->scan_rsp_len);
750
751 return adv_instance->scan_rsp_len;
752}
753
754static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
755{
756 struct hci_dev *hdev = req->hdev;
757 struct hci_cp_le_set_scan_rsp_data cp;
758 u8 len;
759
760 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
761 return;
762
763 memset(&cp, 0, sizeof(cp));
764
765 if (instance)
766 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
767 else
768 len = create_default_scan_rsp_data(hdev, cp.data);
769
770 if (hdev->scan_rsp_data_len == len &&
771 !memcmp(cp.data, hdev->scan_rsp_data, len))
772 return;
773
774 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
775 hdev->scan_rsp_data_len = len;
776
777 cp.length = len;
778
779 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
780}
781
782void __hci_req_update_scan_rsp_data(struct hci_request *req, int instance)
783{
784 if (instance == HCI_ADV_CURRENT)
785 instance = get_current_adv_instance(req->hdev);
786
787 update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev));
788}
789
790static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
791{
792 struct adv_info *adv_instance = NULL;
793 u8 ad_len = 0, flags = 0;
794 u32 instance_flags;
795
796 /* Return 0 when the current instance identifier is invalid. */
797 if (instance) {
798 adv_instance = hci_find_adv_instance(hdev, instance);
799 if (!adv_instance)
800 return 0;
801 }
802
803 instance_flags = get_adv_instance_flags(hdev, instance);
804
805 /* The Add Advertising command allows userspace to set both the general
806 * and limited discoverable flags.
807 */
808 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
809 flags |= LE_AD_GENERAL;
810
811 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
812 flags |= LE_AD_LIMITED;
813
814 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
815 /* If a discovery flag wasn't provided, simply use the global
816 * settings.
817 */
818 if (!flags)
819 flags |= mgmt_get_adv_discov_flags(hdev);
820
821 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
822 flags |= LE_AD_NO_BREDR;
823
824 /* If flags would still be empty, then there is no need to
825 * include the "Flags" AD field".
826 */
827 if (flags) {
828 ptr[0] = 0x02;
829 ptr[1] = EIR_FLAGS;
830 ptr[2] = flags;
831
832 ad_len += 3;
833 ptr += 3;
834 }
835 }
836
837 if (adv_instance) {
838 memcpy(ptr, adv_instance->adv_data,
839 adv_instance->adv_data_len);
840 ad_len += adv_instance->adv_data_len;
841 ptr += adv_instance->adv_data_len;
842 }
843
844 /* Provide Tx Power only if we can provide a valid value for it */
845 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
846 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
847 ptr[0] = 0x02;
848 ptr[1] = EIR_TX_POWER;
849 ptr[2] = (u8)hdev->adv_tx_power;
850
851 ad_len += 3;
852 ptr += 3;
853 }
854
855 return ad_len;
856}
857
858static void update_inst_adv_data(struct hci_request *req, u8 instance)
859{
860 struct hci_dev *hdev = req->hdev;
861 struct hci_cp_le_set_adv_data cp;
862 u8 len;
863
864 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
865 return;
866
867 memset(&cp, 0, sizeof(cp));
868
869 len = create_instance_adv_data(hdev, instance, cp.data);
870
871 /* There's nothing to do if the data hasn't changed */
872 if (hdev->adv_data_len == len &&
873 memcmp(cp.data, hdev->adv_data, len) == 0)
874 return;
875
876 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
877 hdev->adv_data_len = len;
878
879 cp.length = len;
880
881 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
882}
883
884void __hci_req_update_adv_data(struct hci_request *req, int instance)
885{
886 if (instance == HCI_ADV_CURRENT)
887 instance = get_current_adv_instance(req->hdev);
888
889 update_inst_adv_data(req, instance);
890}
891
892int hci_req_update_adv_data(struct hci_dev *hdev, int instance)
893{
894 struct hci_request req;
895
896 hci_req_init(&req, hdev);
897 __hci_req_update_adv_data(&req, instance);
898
899 return hci_req_run(&req, NULL);
900}
901
902static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
903{
904 BT_DBG("%s status %u", hdev->name, status);
905}
906
907void hci_req_reenable_advertising(struct hci_dev *hdev)
908{
909 struct hci_request req;
910 u8 instance;
911
912 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
913 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
914 return;
915
916 instance = get_current_adv_instance(hdev);
917
918 hci_req_init(&req, hdev);
919
920 if (instance) {
921 __hci_req_schedule_adv_instance(&req, instance, true);
922 } else {
923 __hci_req_update_adv_data(&req, HCI_ADV_CURRENT);
924 __hci_req_update_scan_rsp_data(&req, HCI_ADV_CURRENT);
925 __hci_req_enable_advertising(&req);
926 }
927
928 hci_req_run(&req, adv_enable_complete);
929}
930
931static void adv_timeout_expire(struct work_struct *work)
932{
933 struct hci_dev *hdev = container_of(work, struct hci_dev,
934 adv_instance_expire.work);
935
936 struct hci_request req;
937 u8 instance;
938
939 BT_DBG("%s", hdev->name);
940
941 hci_dev_lock(hdev);
942
943 hdev->adv_instance_timeout = 0;
944
945 instance = get_current_adv_instance(hdev);
946 if (instance == 0x00)
947 goto unlock;
948
949 hci_req_init(&req, hdev);
950
951 hci_req_clear_adv_instance(hdev, &req, instance, false);
952
953 if (list_empty(&hdev->adv_instances))
954 __hci_req_disable_advertising(&req);
955
956 if (!skb_queue_empty(&req.cmd_q))
957 hci_req_run(&req, NULL);
958
959unlock:
960 hci_dev_unlock(hdev);
961}
962
963int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
964 bool force)
965{
966 struct hci_dev *hdev = req->hdev;
967 struct adv_info *adv_instance = NULL;
968 u16 timeout;
969
970 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
971 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
972 return -EPERM;
973
974 if (hdev->adv_instance_timeout)
975 return -EBUSY;
976
977 adv_instance = hci_find_adv_instance(hdev, instance);
978 if (!adv_instance)
979 return -ENOENT;
980
981 /* A zero timeout means unlimited advertising. As long as there is
982 * only one instance, duration should be ignored. We still set a timeout
983 * in case further instances are being added later on.
984 *
985 * If the remaining lifetime of the instance is more than the duration
986 * then the timeout corresponds to the duration, otherwise it will be
987 * reduced to the remaining instance lifetime.
988 */
989 if (adv_instance->timeout == 0 ||
990 adv_instance->duration <= adv_instance->remaining_time)
991 timeout = adv_instance->duration;
992 else
993 timeout = adv_instance->remaining_time;
994
995 /* The remaining time is being reduced unless the instance is being
996 * advertised without time limit.
997 */
998 if (adv_instance->timeout)
999 adv_instance->remaining_time =
1000 adv_instance->remaining_time - timeout;
1001
1002 hdev->adv_instance_timeout = timeout;
1003 queue_delayed_work(hdev->req_workqueue,
1004 &hdev->adv_instance_expire,
1005 msecs_to_jiffies(timeout * 1000));
1006
1007 /* If we're just re-scheduling the same instance again then do not
1008 * execute any HCI commands. This happens when a single instance is
1009 * being advertised.
1010 */
1011 if (!force && hdev->cur_adv_instance == instance &&
1012 hci_dev_test_flag(hdev, HCI_LE_ADV))
1013 return 0;
1014
1015 hdev->cur_adv_instance = instance;
1016 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1017 __hci_req_update_scan_rsp_data(req, HCI_ADV_CURRENT);
1018 __hci_req_enable_advertising(req);
1019
1020 return 0;
1021}
1022
1023static void cancel_adv_timeout(struct hci_dev *hdev)
1024{
1025 if (hdev->adv_instance_timeout) {
1026 hdev->adv_instance_timeout = 0;
1027 cancel_delayed_work(&hdev->adv_instance_expire);
1028 }
1029}
1030
1031/* For a single instance:
1032 * - force == true: The instance will be removed even when its remaining
1033 * lifetime is not zero.
1034 * - force == false: the instance will be deactivated but kept stored unless
1035 * the remaining lifetime is zero.
1036 *
1037 * For instance == 0x00:
1038 * - force == true: All instances will be removed regardless of their timeout
1039 * setting.
1040 * - force == false: Only instances that have a timeout will be removed.
1041 */
1042void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1043 u8 instance, bool force)
1044{
1045 struct adv_info *adv_instance, *n, *next_instance = NULL;
1046 int err;
1047 u8 rem_inst;
1048
1049 /* Cancel any timeout concerning the removed instance(s). */
1050 if (!instance || hdev->cur_adv_instance == instance)
1051 cancel_adv_timeout(hdev);
1052
1053 /* Get the next instance to advertise BEFORE we remove
1054 * the current one. This can be the same instance again
1055 * if there is only one instance.
1056 */
1057 if (instance && hdev->cur_adv_instance == instance)
1058 next_instance = hci_get_next_instance(hdev, instance);
1059
1060 if (instance == 0x00) {
1061 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1062 list) {
1063 if (!(force || adv_instance->timeout))
1064 continue;
1065
1066 rem_inst = adv_instance->instance;
1067 err = hci_remove_adv_instance(hdev, rem_inst);
1068 if (!err)
1069 mgmt_advertising_removed(NULL, hdev, rem_inst);
1070 }
1071 hdev->cur_adv_instance = 0x00;
1072 } else {
1073 adv_instance = hci_find_adv_instance(hdev, instance);
1074
1075 if (force || (adv_instance && adv_instance->timeout &&
1076 !adv_instance->remaining_time)) {
1077 /* Don't advertise a removed instance. */
1078 if (next_instance &&
1079 next_instance->instance == instance)
1080 next_instance = NULL;
1081
1082 err = hci_remove_adv_instance(hdev, instance);
1083 if (!err)
1084 mgmt_advertising_removed(NULL, hdev, instance);
1085 }
1086 }
1087
1088 if (list_empty(&hdev->adv_instances)) {
1089 hdev->cur_adv_instance = 0x00;
1090 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1091 }
1092
1093 if (!req || !hdev_is_powered(hdev) ||
1094 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1095 return;
1096
1097 if (next_instance)
1098 __hci_req_schedule_adv_instance(req, next_instance->instance,
1099 false);
1100}
1101
0857dd3b
JH
1102static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1103{
1104 struct hci_dev *hdev = req->hdev;
1105
1106 /* If we're advertising or initiating an LE connection we can't
1107 * go ahead and change the random address at this time. This is
1108 * because the eventual initiator address used for the
1109 * subsequently created connection will be undefined (some
1110 * controllers use the new address and others the one we had
1111 * when the operation started).
1112 *
1113 * In this kind of scenario skip the update and let the random
1114 * address be updated at the next cycle.
1115 */
d7a5a11d 1116 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
e7d9ab73 1117 hci_lookup_le_connect(hdev)) {
0857dd3b 1118 BT_DBG("Deferring random address update");
a1536da2 1119 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
0857dd3b
JH
1120 return;
1121 }
1122
1123 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1124}
1125
1126int hci_update_random_address(struct hci_request *req, bool require_privacy,
1127 u8 *own_addr_type)
1128{
1129 struct hci_dev *hdev = req->hdev;
1130 int err;
1131
1132 /* If privacy is enabled use a resolvable private address. If
1133 * current RPA has expired or there is something else than
1134 * the current RPA in use, then generate a new one.
1135 */
d7a5a11d 1136 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
0857dd3b
JH
1137 int to;
1138
1139 *own_addr_type = ADDR_LE_DEV_RANDOM;
1140
a69d8927 1141 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
0857dd3b
JH
1142 !bacmp(&hdev->random_addr, &hdev->rpa))
1143 return 0;
1144
1145 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1146 if (err < 0) {
1147 BT_ERR("%s failed to generate new RPA", hdev->name);
1148 return err;
1149 }
1150
1151 set_random_addr(req, &hdev->rpa);
1152
1153 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1154 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1155
1156 return 0;
1157 }
1158
1159 /* In case of required privacy without resolvable private address,
1160 * use an non-resolvable private address. This is useful for active
1161 * scanning and non-connectable advertising.
1162 */
1163 if (require_privacy) {
1164 bdaddr_t nrpa;
1165
1166 while (true) {
1167 /* The non-resolvable private address is generated
1168 * from random six bytes with the two most significant
1169 * bits cleared.
1170 */
1171 get_random_bytes(&nrpa, 6);
1172 nrpa.b[5] &= 0x3f;
1173
1174 /* The non-resolvable private address shall not be
1175 * equal to the public address.
1176 */
1177 if (bacmp(&hdev->bdaddr, &nrpa))
1178 break;
1179 }
1180
1181 *own_addr_type = ADDR_LE_DEV_RANDOM;
1182 set_random_addr(req, &nrpa);
1183 return 0;
1184 }
1185
1186 /* If forcing static address is in use or there is no public
1187 * address use the static address as random address (but skip
1188 * the HCI command if the current random address is already the
1189 * static one.
50b5b952
MH
1190 *
1191 * In case BR/EDR has been disabled on a dual-mode controller
1192 * and a static address has been configured, then use that
1193 * address instead of the public BR/EDR address.
0857dd3b 1194 */
b7cb93e5 1195 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 1196 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 1197 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 1198 bacmp(&hdev->static_addr, BDADDR_ANY))) {
0857dd3b
JH
1199 *own_addr_type = ADDR_LE_DEV_RANDOM;
1200 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1201 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1202 &hdev->static_addr);
1203 return 0;
1204 }
1205
1206 /* Neither privacy nor static address is being used so use a
1207 * public address.
1208 */
1209 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1210
1211 return 0;
1212}
2cf22218 1213
405a2611
JH
1214static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1215{
1216 struct bdaddr_list *b;
1217
1218 list_for_each_entry(b, &hdev->whitelist, list) {
1219 struct hci_conn *conn;
1220
1221 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1222 if (!conn)
1223 return true;
1224
1225 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1226 return true;
1227 }
1228
1229 return false;
1230}
1231
01b1cb87 1232void __hci_req_update_scan(struct hci_request *req)
405a2611
JH
1233{
1234 struct hci_dev *hdev = req->hdev;
1235 u8 scan;
1236
d7a5a11d 1237 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
405a2611
JH
1238 return;
1239
1240 if (!hdev_is_powered(hdev))
1241 return;
1242
1243 if (mgmt_powering_down(hdev))
1244 return;
1245
d7a5a11d 1246 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
405a2611
JH
1247 disconnected_whitelist_entries(hdev))
1248 scan = SCAN_PAGE;
1249 else
1250 scan = SCAN_DISABLED;
1251
d7a5a11d 1252 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
405a2611
JH
1253 scan |= SCAN_INQUIRY;
1254
01b1cb87
JH
1255 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1256 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1257 return;
1258
405a2611
JH
1259 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1260}
1261
01b1cb87 1262static int update_scan(struct hci_request *req, unsigned long opt)
405a2611 1263{
01b1cb87
JH
1264 hci_dev_lock(req->hdev);
1265 __hci_req_update_scan(req);
1266 hci_dev_unlock(req->hdev);
1267 return 0;
1268}
405a2611 1269
01b1cb87
JH
1270static void scan_update_work(struct work_struct *work)
1271{
1272 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1273
1274 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
405a2611
JH
1275}
1276
53c0ba74
JH
1277static int connectable_update(struct hci_request *req, unsigned long opt)
1278{
1279 struct hci_dev *hdev = req->hdev;
1280
1281 hci_dev_lock(hdev);
1282
1283 __hci_req_update_scan(req);
1284
1285 /* If BR/EDR is not enabled and we disable advertising as a
1286 * by-product of disabling connectable, we need to update the
1287 * advertising flags.
1288 */
1289 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1290 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1291
1292 /* Update the advertising parameters if necessary */
1293 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1294 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1295 __hci_req_enable_advertising(req);
1296
1297 __hci_update_background_scan(req);
1298
1299 hci_dev_unlock(hdev);
1300
1301 return 0;
1302}
1303
1304static void connectable_update_work(struct work_struct *work)
1305{
1306 struct hci_dev *hdev = container_of(work, struct hci_dev,
1307 connectable_update);
1308 u8 status;
1309
1310 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1311 mgmt_set_connectable_complete(hdev, status);
1312}
1313
14bf5eac
JH
1314static u8 get_service_classes(struct hci_dev *hdev)
1315{
1316 struct bt_uuid *uuid;
1317 u8 val = 0;
1318
1319 list_for_each_entry(uuid, &hdev->uuids, list)
1320 val |= uuid->svc_hint;
1321
1322 return val;
1323}
1324
1325void __hci_req_update_class(struct hci_request *req)
1326{
1327 struct hci_dev *hdev = req->hdev;
1328 u8 cod[3];
1329
1330 BT_DBG("%s", hdev->name);
1331
1332 if (!hdev_is_powered(hdev))
1333 return;
1334
1335 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1336 return;
1337
1338 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1339 return;
1340
1341 cod[0] = hdev->minor_class;
1342 cod[1] = hdev->major_class;
1343 cod[2] = get_service_classes(hdev);
1344
1345 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1346 cod[1] |= 0x20;
1347
1348 if (memcmp(cod, hdev->dev_class, 3) == 0)
1349 return;
1350
1351 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1352}
1353
dcc0f0d9
JH
1354void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1355 u8 reason)
1356{
1357 switch (conn->state) {
1358 case BT_CONNECTED:
1359 case BT_CONFIG:
1360 if (conn->type == AMP_LINK) {
1361 struct hci_cp_disconn_phy_link cp;
1362
1363 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1364 cp.reason = reason;
1365 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1366 &cp);
1367 } else {
1368 struct hci_cp_disconnect dc;
1369
1370 dc.handle = cpu_to_le16(conn->handle);
1371 dc.reason = reason;
1372 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1373 }
1374
1375 conn->state = BT_DISCONN;
1376
1377 break;
1378 case BT_CONNECT:
1379 if (conn->type == LE_LINK) {
1380 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1381 break;
1382 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1383 0, NULL);
1384 } else if (conn->type == ACL_LINK) {
1385 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1386 break;
1387 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1388 6, &conn->dst);
1389 }
1390 break;
1391 case BT_CONNECT2:
1392 if (conn->type == ACL_LINK) {
1393 struct hci_cp_reject_conn_req rej;
1394
1395 bacpy(&rej.bdaddr, &conn->dst);
1396 rej.reason = reason;
1397
1398 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1399 sizeof(rej), &rej);
1400 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1401 struct hci_cp_reject_sync_conn_req rej;
1402
1403 bacpy(&rej.bdaddr, &conn->dst);
1404
1405 /* SCO rejection has its own limited set of
1406 * allowed error values (0x0D-0x0F) which isn't
1407 * compatible with most values passed to this
1408 * function. To be safe hard-code one of the
1409 * values that's suitable for SCO.
1410 */
1411 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1412
1413 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1414 sizeof(rej), &rej);
1415 }
1416 break;
1417 default:
1418 conn->state = BT_CLOSED;
1419 break;
1420 }
1421}
1422
1423static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1424{
1425 if (status)
1426 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1427}
1428
1429int hci_abort_conn(struct hci_conn *conn, u8 reason)
1430{
1431 struct hci_request req;
1432 int err;
1433
1434 hci_req_init(&req, conn->hdev);
1435
1436 __hci_abort_conn(&req, conn, reason);
1437
1438 err = hci_req_run(&req, abort_conn_complete);
1439 if (err && err != -ENODATA) {
1440 BT_ERR("Failed to run HCI request: err %d", err);
1441 return err;
1442 }
1443
1444 return 0;
1445}
5fc16cc4 1446
a1d01db1 1447static int update_bg_scan(struct hci_request *req, unsigned long opt)
2e93e53b
JH
1448{
1449 hci_dev_lock(req->hdev);
1450 __hci_update_background_scan(req);
1451 hci_dev_unlock(req->hdev);
a1d01db1 1452 return 0;
2e93e53b
JH
1453}
1454
1455static void bg_scan_update(struct work_struct *work)
1456{
1457 struct hci_dev *hdev = container_of(work, struct hci_dev,
1458 bg_scan_update);
84235d22
JH
1459 struct hci_conn *conn;
1460 u8 status;
1461 int err;
1462
1463 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1464 if (!err)
1465 return;
1466
1467 hci_dev_lock(hdev);
1468
1469 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1470 if (conn)
1471 hci_le_conn_failed(conn, status);
2e93e53b 1472
84235d22 1473 hci_dev_unlock(hdev);
2e93e53b
JH
1474}
1475
f4a2cb4d 1476static int le_scan_disable(struct hci_request *req, unsigned long opt)
7c1fbed2 1477{
f4a2cb4d
JH
1478 hci_req_add_le_scan_disable(req);
1479 return 0;
7c1fbed2
JH
1480}
1481
f4a2cb4d 1482static int bredr_inquiry(struct hci_request *req, unsigned long opt)
7c1fbed2 1483{
f4a2cb4d 1484 u8 length = opt;
7c1fbed2
JH
1485 /* General inquiry access code (GIAC) */
1486 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1487 struct hci_cp_inquiry cp;
7c1fbed2 1488
f4a2cb4d 1489 BT_DBG("%s", req->hdev->name);
7c1fbed2 1490
f4a2cb4d
JH
1491 hci_dev_lock(req->hdev);
1492 hci_inquiry_cache_flush(req->hdev);
1493 hci_dev_unlock(req->hdev);
7c1fbed2 1494
f4a2cb4d
JH
1495 memset(&cp, 0, sizeof(cp));
1496 memcpy(&cp.lap, lap, sizeof(cp.lap));
1497 cp.length = length;
7c1fbed2 1498
f4a2cb4d 1499 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7c1fbed2 1500
a1d01db1 1501 return 0;
7c1fbed2
JH
1502}
1503
1504static void le_scan_disable_work(struct work_struct *work)
1505{
1506 struct hci_dev *hdev = container_of(work, struct hci_dev,
1507 le_scan_disable.work);
1508 u8 status;
7c1fbed2
JH
1509
1510 BT_DBG("%s", hdev->name);
1511
f4a2cb4d
JH
1512 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1513 return;
1514
7c1fbed2
JH
1515 cancel_delayed_work(&hdev->le_scan_restart);
1516
f4a2cb4d
JH
1517 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1518 if (status) {
1519 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1520 return;
1521 }
1522
1523 hdev->discovery.scan_start = 0;
1524
1525 /* If we were running LE only scan, change discovery state. If
1526 * we were running both LE and BR/EDR inquiry simultaneously,
1527 * and BR/EDR inquiry is already finished, stop discovery,
1528 * otherwise BR/EDR inquiry will stop discovery when finished.
1529 * If we will resolve remote device name, do not change
1530 * discovery state.
1531 */
1532
1533 if (hdev->discovery.type == DISCOV_TYPE_LE)
1534 goto discov_stopped;
1535
1536 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
7c1fbed2
JH
1537 return;
1538
f4a2cb4d
JH
1539 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1540 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1541 hdev->discovery.state != DISCOVERY_RESOLVING)
1542 goto discov_stopped;
1543
1544 return;
1545 }
1546
1547 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1548 HCI_CMD_TIMEOUT, &status);
1549 if (status) {
1550 BT_ERR("Inquiry failed: status 0x%02x", status);
1551 goto discov_stopped;
1552 }
1553
1554 return;
1555
1556discov_stopped:
1557 hci_dev_lock(hdev);
1558 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1559 hci_dev_unlock(hdev);
7c1fbed2
JH
1560}
1561
3dfe5905
JH
1562static int le_scan_restart(struct hci_request *req, unsigned long opt)
1563{
1564 struct hci_dev *hdev = req->hdev;
1565 struct hci_cp_le_set_scan_enable cp;
1566
1567 /* If controller is not scanning we are done. */
1568 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1569 return 0;
1570
1571 hci_req_add_le_scan_disable(req);
1572
1573 memset(&cp, 0, sizeof(cp));
1574 cp.enable = LE_SCAN_ENABLE;
1575 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1576 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1577
1578 return 0;
1579}
1580
1581static void le_scan_restart_work(struct work_struct *work)
7c1fbed2 1582{
3dfe5905
JH
1583 struct hci_dev *hdev = container_of(work, struct hci_dev,
1584 le_scan_restart.work);
7c1fbed2 1585 unsigned long timeout, duration, scan_start, now;
3dfe5905 1586 u8 status;
7c1fbed2
JH
1587
1588 BT_DBG("%s", hdev->name);
1589
3dfe5905 1590 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
7c1fbed2
JH
1591 if (status) {
1592 BT_ERR("Failed to restart LE scan: status %d", status);
1593 return;
1594 }
1595
1596 hci_dev_lock(hdev);
1597
1598 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1599 !hdev->discovery.scan_start)
1600 goto unlock;
1601
1602 /* When the scan was started, hdev->le_scan_disable has been queued
1603 * after duration from scan_start. During scan restart this job
1604 * has been canceled, and we need to queue it again after proper
1605 * timeout, to make sure that scan does not run indefinitely.
1606 */
1607 duration = hdev->discovery.scan_duration;
1608 scan_start = hdev->discovery.scan_start;
1609 now = jiffies;
1610 if (now - scan_start <= duration) {
1611 int elapsed;
1612
1613 if (now >= scan_start)
1614 elapsed = now - scan_start;
1615 else
1616 elapsed = ULONG_MAX - scan_start + now;
1617
1618 timeout = duration - elapsed;
1619 } else {
1620 timeout = 0;
1621 }
1622
1623 queue_delayed_work(hdev->req_workqueue,
1624 &hdev->le_scan_disable, timeout);
1625
1626unlock:
1627 hci_dev_unlock(hdev);
1628}
1629
e68f072b
JH
1630static void disable_advertising(struct hci_request *req)
1631{
1632 u8 enable = 0x00;
1633
1634 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1635}
1636
1637static int active_scan(struct hci_request *req, unsigned long opt)
1638{
1639 uint16_t interval = opt;
1640 struct hci_dev *hdev = req->hdev;
1641 struct hci_cp_le_set_scan_param param_cp;
1642 struct hci_cp_le_set_scan_enable enable_cp;
1643 u8 own_addr_type;
1644 int err;
1645
1646 BT_DBG("%s", hdev->name);
1647
1648 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1649 hci_dev_lock(hdev);
1650
1651 /* Don't let discovery abort an outgoing connection attempt
1652 * that's using directed advertising.
1653 */
1654 if (hci_lookup_le_connect(hdev)) {
1655 hci_dev_unlock(hdev);
1656 return -EBUSY;
1657 }
1658
1659 cancel_adv_timeout(hdev);
1660 hci_dev_unlock(hdev);
1661
1662 disable_advertising(req);
1663 }
1664
1665 /* If controller is scanning, it means the background scanning is
1666 * running. Thus, we should temporarily stop it in order to set the
1667 * discovery scanning parameters.
1668 */
1669 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1670 hci_req_add_le_scan_disable(req);
1671
1672 /* All active scans will be done with either a resolvable private
1673 * address (when privacy feature has been enabled) or non-resolvable
1674 * private address.
1675 */
1676 err = hci_update_random_address(req, true, &own_addr_type);
1677 if (err < 0)
1678 own_addr_type = ADDR_LE_DEV_PUBLIC;
1679
1680 memset(&param_cp, 0, sizeof(param_cp));
1681 param_cp.type = LE_SCAN_ACTIVE;
1682 param_cp.interval = cpu_to_le16(interval);
1683 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1684 param_cp.own_address_type = own_addr_type;
1685
1686 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1687 &param_cp);
1688
1689 memset(&enable_cp, 0, sizeof(enable_cp));
1690 enable_cp.enable = LE_SCAN_ENABLE;
1691 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1692
1693 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1694 &enable_cp);
1695
1696 return 0;
1697}
1698
1699static int interleaved_discov(struct hci_request *req, unsigned long opt)
1700{
1701 int err;
1702
1703 BT_DBG("%s", req->hdev->name);
1704
1705 err = active_scan(req, opt);
1706 if (err)
1707 return err;
1708
7df26b56 1709 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
e68f072b
JH
1710}
1711
1712static void start_discovery(struct hci_dev *hdev, u8 *status)
1713{
1714 unsigned long timeout;
1715
1716 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
1717
1718 switch (hdev->discovery.type) {
1719 case DISCOV_TYPE_BREDR:
1720 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
7df26b56
JH
1721 hci_req_sync(hdev, bredr_inquiry,
1722 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
e68f072b
JH
1723 status);
1724 return;
1725 case DISCOV_TYPE_INTERLEAVED:
1726 /* When running simultaneous discovery, the LE scanning time
1727 * should occupy the whole discovery time sine BR/EDR inquiry
1728 * and LE scanning are scheduled by the controller.
1729 *
1730 * For interleaving discovery in comparison, BR/EDR inquiry
1731 * and LE scanning are done sequentially with separate
1732 * timeouts.
1733 */
1734 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
1735 &hdev->quirks)) {
1736 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1737 /* During simultaneous discovery, we double LE scan
1738 * interval. We must leave some time for the controller
1739 * to do BR/EDR inquiry.
1740 */
1741 hci_req_sync(hdev, interleaved_discov,
1742 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
1743 status);
1744 break;
1745 }
1746
1747 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
1748 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1749 HCI_CMD_TIMEOUT, status);
1750 break;
1751 case DISCOV_TYPE_LE:
1752 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1753 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1754 HCI_CMD_TIMEOUT, status);
1755 break;
1756 default:
1757 *status = HCI_ERROR_UNSPECIFIED;
1758 return;
1759 }
1760
1761 if (*status)
1762 return;
1763
1764 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
1765
1766 /* When service discovery is used and the controller has a
1767 * strict duplicate filter, it is important to remember the
1768 * start and duration of the scan. This is required for
1769 * restarting scanning during the discovery phase.
1770 */
1771 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
1772 hdev->discovery.result_filtering) {
1773 hdev->discovery.scan_start = jiffies;
1774 hdev->discovery.scan_duration = timeout;
1775 }
1776
1777 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
1778 timeout);
1779}
1780
2154d3f4
JH
1781bool hci_req_stop_discovery(struct hci_request *req)
1782{
1783 struct hci_dev *hdev = req->hdev;
1784 struct discovery_state *d = &hdev->discovery;
1785 struct hci_cp_remote_name_req_cancel cp;
1786 struct inquiry_entry *e;
1787 bool ret = false;
1788
1789 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
1790
1791 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
1792 if (test_bit(HCI_INQUIRY, &hdev->flags))
1793 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1794
1795 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1796 cancel_delayed_work(&hdev->le_scan_disable);
1797 hci_req_add_le_scan_disable(req);
1798 }
1799
1800 ret = true;
1801 } else {
1802 /* Passive scanning */
1803 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1804 hci_req_add_le_scan_disable(req);
1805 ret = true;
1806 }
1807 }
1808
1809 /* No further actions needed for LE-only discovery */
1810 if (d->type == DISCOV_TYPE_LE)
1811 return ret;
1812
1813 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
1814 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1815 NAME_PENDING);
1816 if (!e)
1817 return ret;
1818
1819 bacpy(&cp.bdaddr, &e->data.bdaddr);
1820 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1821 &cp);
1822 ret = true;
1823 }
1824
1825 return ret;
1826}
1827
1828static int stop_discovery(struct hci_request *req, unsigned long opt)
1829{
1830 hci_dev_lock(req->hdev);
1831 hci_req_stop_discovery(req);
1832 hci_dev_unlock(req->hdev);
1833
1834 return 0;
1835}
1836
e68f072b
JH
1837static void discov_update(struct work_struct *work)
1838{
1839 struct hci_dev *hdev = container_of(work, struct hci_dev,
1840 discov_update);
1841 u8 status = 0;
1842
1843 switch (hdev->discovery.state) {
1844 case DISCOVERY_STARTING:
1845 start_discovery(hdev, &status);
1846 mgmt_start_discovery_complete(hdev, status);
1847 if (status)
1848 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1849 else
1850 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1851 break;
2154d3f4
JH
1852 case DISCOVERY_STOPPING:
1853 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
1854 mgmt_stop_discovery_complete(hdev, status);
1855 if (!status)
1856 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1857 break;
e68f072b
JH
1858 case DISCOVERY_STOPPED:
1859 default:
1860 return;
1861 }
1862}
1863
5fc16cc4
JH
1864void hci_request_setup(struct hci_dev *hdev)
1865{
e68f072b 1866 INIT_WORK(&hdev->discov_update, discov_update);
2e93e53b 1867 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
01b1cb87 1868 INIT_WORK(&hdev->scan_update, scan_update_work);
53c0ba74 1869 INIT_WORK(&hdev->connectable_update, connectable_update_work);
7c1fbed2
JH
1870 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1871 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
f2252570 1872 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
5fc16cc4
JH
1873}
1874
1875void hci_request_cancel_all(struct hci_dev *hdev)
1876{
7df0f73e
JH
1877 hci_req_sync_cancel(hdev, ENODEV);
1878
e68f072b 1879 cancel_work_sync(&hdev->discov_update);
2e93e53b 1880 cancel_work_sync(&hdev->bg_scan_update);
01b1cb87 1881 cancel_work_sync(&hdev->scan_update);
53c0ba74 1882 cancel_work_sync(&hdev->connectable_update);
7c1fbed2
JH
1883 cancel_delayed_work_sync(&hdev->le_scan_disable);
1884 cancel_delayed_work_sync(&hdev->le_scan_restart);
f2252570
JH
1885
1886 if (hdev->adv_instance_timeout) {
1887 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1888 hdev->adv_instance_timeout = 0;
1889 }
5fc16cc4 1890}