Bluetooth: Add mgmt support for LE Secure Connections LTK types
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_BONDABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
96 };
97
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
100 MGMT_EV_INDEX_ADDED,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
113 MGMT_EV_AUTH_FAILED,
114 MGMT_EV_DEVICE_FOUND,
115 MGMT_EV_DISCOVERING,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
120 MGMT_EV_NEW_IRK,
121 MGMT_EV_NEW_CSRK,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
128 };
129
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
131
132 struct pending_cmd {
133 struct list_head list;
134 u16 opcode;
135 int index;
136 void *param;
137 struct sock *sk;
138 void *user_data;
139 };
140
141 /* HCI to MGMT error code conversion table */
142 static u8 mgmt_status_table[] = {
143 MGMT_STATUS_SUCCESS,
144 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
145 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
146 MGMT_STATUS_FAILED, /* Hardware Failure */
147 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
148 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
149 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
150 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
151 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
152 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
153 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
154 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
155 MGMT_STATUS_BUSY, /* Command Disallowed */
156 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
157 MGMT_STATUS_REJECTED, /* Rejected Security */
158 MGMT_STATUS_REJECTED, /* Rejected Personal */
159 MGMT_STATUS_TIMEOUT, /* Host Timeout */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
161 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
162 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
163 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
164 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
165 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
166 MGMT_STATUS_BUSY, /* Repeated Attempts */
167 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
168 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
169 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
170 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
171 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
172 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
173 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
174 MGMT_STATUS_FAILED, /* Unspecified Error */
175 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
176 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
177 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
178 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
179 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
180 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
181 MGMT_STATUS_FAILED, /* Unit Link Key Used */
182 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
183 MGMT_STATUS_TIMEOUT, /* Instant Passed */
184 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
185 MGMT_STATUS_FAILED, /* Transaction Collision */
186 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
187 MGMT_STATUS_REJECTED, /* QoS Rejected */
188 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
189 MGMT_STATUS_REJECTED, /* Insufficient Security */
190 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
191 MGMT_STATUS_BUSY, /* Role Switch Pending */
192 MGMT_STATUS_FAILED, /* Slot Violation */
193 MGMT_STATUS_FAILED, /* Role Switch Failed */
194 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
195 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
196 MGMT_STATUS_BUSY, /* Host Busy Pairing */
197 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
198 MGMT_STATUS_BUSY, /* Controller Busy */
199 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
200 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
201 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
202 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
203 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
204 };
205
206 static u8 mgmt_status(u8 hci_status)
207 {
208 if (hci_status < ARRAY_SIZE(mgmt_status_table))
209 return mgmt_status_table[hci_status];
210
211 return MGMT_STATUS_FAILED;
212 }
213
214 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
215 struct sock *skip_sk)
216 {
217 struct sk_buff *skb;
218 struct mgmt_hdr *hdr;
219
220 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
221 if (!skb)
222 return -ENOMEM;
223
224 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = cpu_to_le16(event);
226 if (hdev)
227 hdr->index = cpu_to_le16(hdev->id);
228 else
229 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
230 hdr->len = cpu_to_le16(data_len);
231
232 if (data)
233 memcpy(skb_put(skb, data_len), data, data_len);
234
235 /* Time stamp */
236 __net_timestamp(skb);
237
238 hci_send_to_control(skb, skip_sk);
239 kfree_skb(skb);
240
241 return 0;
242 }
243
244 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
245 {
246 struct sk_buff *skb;
247 struct mgmt_hdr *hdr;
248 struct mgmt_ev_cmd_status *ev;
249 int err;
250
251 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
252
253 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
254 if (!skb)
255 return -ENOMEM;
256
257 hdr = (void *) skb_put(skb, sizeof(*hdr));
258
259 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
260 hdr->index = cpu_to_le16(index);
261 hdr->len = cpu_to_le16(sizeof(*ev));
262
263 ev = (void *) skb_put(skb, sizeof(*ev));
264 ev->status = status;
265 ev->opcode = cpu_to_le16(cmd);
266
267 err = sock_queue_rcv_skb(sk, skb);
268 if (err < 0)
269 kfree_skb(skb);
270
271 return err;
272 }
273
274 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
275 void *rp, size_t rp_len)
276 {
277 struct sk_buff *skb;
278 struct mgmt_hdr *hdr;
279 struct mgmt_ev_cmd_complete *ev;
280 int err;
281
282 BT_DBG("sock %p", sk);
283
284 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
285 if (!skb)
286 return -ENOMEM;
287
288 hdr = (void *) skb_put(skb, sizeof(*hdr));
289
290 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
291 hdr->index = cpu_to_le16(index);
292 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
293
294 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
295 ev->opcode = cpu_to_le16(cmd);
296 ev->status = status;
297
298 if (rp)
299 memcpy(ev->data, rp, rp_len);
300
301 err = sock_queue_rcv_skb(sk, skb);
302 if (err < 0)
303 kfree_skb(skb);
304
305 return err;
306 }
307
308 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
309 u16 data_len)
310 {
311 struct mgmt_rp_read_version rp;
312
313 BT_DBG("sock %p", sk);
314
315 rp.version = MGMT_VERSION;
316 rp.revision = cpu_to_le16(MGMT_REVISION);
317
318 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
319 sizeof(rp));
320 }
321
322 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
323 u16 data_len)
324 {
325 struct mgmt_rp_read_commands *rp;
326 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
327 const u16 num_events = ARRAY_SIZE(mgmt_events);
328 __le16 *opcode;
329 size_t rp_size;
330 int i, err;
331
332 BT_DBG("sock %p", sk);
333
334 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
335
336 rp = kmalloc(rp_size, GFP_KERNEL);
337 if (!rp)
338 return -ENOMEM;
339
340 rp->num_commands = cpu_to_le16(num_commands);
341 rp->num_events = cpu_to_le16(num_events);
342
343 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
344 put_unaligned_le16(mgmt_commands[i], opcode);
345
346 for (i = 0; i < num_events; i++, opcode++)
347 put_unaligned_le16(mgmt_events[i], opcode);
348
349 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
350 rp_size);
351 kfree(rp);
352
353 return err;
354 }
355
356 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
357 u16 data_len)
358 {
359 struct mgmt_rp_read_index_list *rp;
360 struct hci_dev *d;
361 size_t rp_len;
362 u16 count;
363 int err;
364
365 BT_DBG("sock %p", sk);
366
367 read_lock(&hci_dev_list_lock);
368
369 count = 0;
370 list_for_each_entry(d, &hci_dev_list, list) {
371 if (d->dev_type == HCI_BREDR &&
372 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
373 count++;
374 }
375
376 rp_len = sizeof(*rp) + (2 * count);
377 rp = kmalloc(rp_len, GFP_ATOMIC);
378 if (!rp) {
379 read_unlock(&hci_dev_list_lock);
380 return -ENOMEM;
381 }
382
383 count = 0;
384 list_for_each_entry(d, &hci_dev_list, list) {
385 if (test_bit(HCI_SETUP, &d->dev_flags) ||
386 test_bit(HCI_CONFIG, &d->dev_flags) ||
387 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
388 continue;
389
390 /* Devices marked as raw-only are neither configured
391 * nor unconfigured controllers.
392 */
393 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
394 continue;
395
396 if (d->dev_type == HCI_BREDR &&
397 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
398 rp->index[count++] = cpu_to_le16(d->id);
399 BT_DBG("Added hci%u", d->id);
400 }
401 }
402
403 rp->num_controllers = cpu_to_le16(count);
404 rp_len = sizeof(*rp) + (2 * count);
405
406 read_unlock(&hci_dev_list_lock);
407
408 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
409 rp_len);
410
411 kfree(rp);
412
413 return err;
414 }
415
416 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
417 void *data, u16 data_len)
418 {
419 struct mgmt_rp_read_unconf_index_list *rp;
420 struct hci_dev *d;
421 size_t rp_len;
422 u16 count;
423 int err;
424
425 BT_DBG("sock %p", sk);
426
427 read_lock(&hci_dev_list_lock);
428
429 count = 0;
430 list_for_each_entry(d, &hci_dev_list, list) {
431 if (d->dev_type == HCI_BREDR &&
432 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
433 count++;
434 }
435
436 rp_len = sizeof(*rp) + (2 * count);
437 rp = kmalloc(rp_len, GFP_ATOMIC);
438 if (!rp) {
439 read_unlock(&hci_dev_list_lock);
440 return -ENOMEM;
441 }
442
443 count = 0;
444 list_for_each_entry(d, &hci_dev_list, list) {
445 if (test_bit(HCI_SETUP, &d->dev_flags) ||
446 test_bit(HCI_CONFIG, &d->dev_flags) ||
447 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
448 continue;
449
450 /* Devices marked as raw-only are neither configured
451 * nor unconfigured controllers.
452 */
453 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
454 continue;
455
456 if (d->dev_type == HCI_BREDR &&
457 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
458 rp->index[count++] = cpu_to_le16(d->id);
459 BT_DBG("Added hci%u", d->id);
460 }
461 }
462
463 rp->num_controllers = cpu_to_le16(count);
464 rp_len = sizeof(*rp) + (2 * count);
465
466 read_unlock(&hci_dev_list_lock);
467
468 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
469 0, rp, rp_len);
470
471 kfree(rp);
472
473 return err;
474 }
475
476 static bool is_configured(struct hci_dev *hdev)
477 {
478 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
479 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
480 return false;
481
482 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
483 !bacmp(&hdev->public_addr, BDADDR_ANY))
484 return false;
485
486 return true;
487 }
488
489 static __le32 get_missing_options(struct hci_dev *hdev)
490 {
491 u32 options = 0;
492
493 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
494 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
495 options |= MGMT_OPTION_EXTERNAL_CONFIG;
496
497 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
498 !bacmp(&hdev->public_addr, BDADDR_ANY))
499 options |= MGMT_OPTION_PUBLIC_ADDRESS;
500
501 return cpu_to_le32(options);
502 }
503
504 static int new_options(struct hci_dev *hdev, struct sock *skip)
505 {
506 __le32 options = get_missing_options(hdev);
507
508 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
509 sizeof(options), skip);
510 }
511
512 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
513 {
514 __le32 options = get_missing_options(hdev);
515
516 return cmd_complete(sk, hdev->id, opcode, 0, &options,
517 sizeof(options));
518 }
519
520 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
521 void *data, u16 data_len)
522 {
523 struct mgmt_rp_read_config_info rp;
524 u32 options = 0;
525
526 BT_DBG("sock %p %s", sk, hdev->name);
527
528 hci_dev_lock(hdev);
529
530 memset(&rp, 0, sizeof(rp));
531 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
532
533 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
534 options |= MGMT_OPTION_EXTERNAL_CONFIG;
535
536 if (hdev->set_bdaddr)
537 options |= MGMT_OPTION_PUBLIC_ADDRESS;
538
539 rp.supported_options = cpu_to_le32(options);
540 rp.missing_options = get_missing_options(hdev);
541
542 hci_dev_unlock(hdev);
543
544 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
545 sizeof(rp));
546 }
547
548 static u32 get_supported_settings(struct hci_dev *hdev)
549 {
550 u32 settings = 0;
551
552 settings |= MGMT_SETTING_POWERED;
553 settings |= MGMT_SETTING_BONDABLE;
554 settings |= MGMT_SETTING_DEBUG_KEYS;
555 settings |= MGMT_SETTING_CONNECTABLE;
556 settings |= MGMT_SETTING_DISCOVERABLE;
557
558 if (lmp_bredr_capable(hdev)) {
559 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
560 settings |= MGMT_SETTING_FAST_CONNECTABLE;
561 settings |= MGMT_SETTING_BREDR;
562 settings |= MGMT_SETTING_LINK_SECURITY;
563
564 if (lmp_ssp_capable(hdev)) {
565 settings |= MGMT_SETTING_SSP;
566 settings |= MGMT_SETTING_HS;
567 }
568
569 if (lmp_sc_capable(hdev) ||
570 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
571 settings |= MGMT_SETTING_SECURE_CONN;
572 }
573
574 if (lmp_le_capable(hdev)) {
575 settings |= MGMT_SETTING_LE;
576 settings |= MGMT_SETTING_ADVERTISING;
577 settings |= MGMT_SETTING_PRIVACY;
578 }
579
580 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
581 hdev->set_bdaddr)
582 settings |= MGMT_SETTING_CONFIGURATION;
583
584 return settings;
585 }
586
587 static u32 get_current_settings(struct hci_dev *hdev)
588 {
589 u32 settings = 0;
590
591 if (hdev_is_powered(hdev))
592 settings |= MGMT_SETTING_POWERED;
593
594 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
595 settings |= MGMT_SETTING_CONNECTABLE;
596
597 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
598 settings |= MGMT_SETTING_FAST_CONNECTABLE;
599
600 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
601 settings |= MGMT_SETTING_DISCOVERABLE;
602
603 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_BONDABLE;
605
606 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
607 settings |= MGMT_SETTING_BREDR;
608
609 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
610 settings |= MGMT_SETTING_LE;
611
612 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
613 settings |= MGMT_SETTING_LINK_SECURITY;
614
615 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
616 settings |= MGMT_SETTING_SSP;
617
618 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_HS;
620
621 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
622 settings |= MGMT_SETTING_ADVERTISING;
623
624 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
625 settings |= MGMT_SETTING_SECURE_CONN;
626
627 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
628 settings |= MGMT_SETTING_DEBUG_KEYS;
629
630 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
631 settings |= MGMT_SETTING_PRIVACY;
632
633 return settings;
634 }
635
636 #define PNP_INFO_SVCLASS_ID 0x1200
637
638 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
639 {
640 u8 *ptr = data, *uuids_start = NULL;
641 struct bt_uuid *uuid;
642
643 if (len < 4)
644 return ptr;
645
646 list_for_each_entry(uuid, &hdev->uuids, list) {
647 u16 uuid16;
648
649 if (uuid->size != 16)
650 continue;
651
652 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
653 if (uuid16 < 0x1100)
654 continue;
655
656 if (uuid16 == PNP_INFO_SVCLASS_ID)
657 continue;
658
659 if (!uuids_start) {
660 uuids_start = ptr;
661 uuids_start[0] = 1;
662 uuids_start[1] = EIR_UUID16_ALL;
663 ptr += 2;
664 }
665
666 /* Stop if not enough space to put next UUID */
667 if ((ptr - data) + sizeof(u16) > len) {
668 uuids_start[1] = EIR_UUID16_SOME;
669 break;
670 }
671
672 *ptr++ = (uuid16 & 0x00ff);
673 *ptr++ = (uuid16 & 0xff00) >> 8;
674 uuids_start[0] += sizeof(uuid16);
675 }
676
677 return ptr;
678 }
679
680 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
681 {
682 u8 *ptr = data, *uuids_start = NULL;
683 struct bt_uuid *uuid;
684
685 if (len < 6)
686 return ptr;
687
688 list_for_each_entry(uuid, &hdev->uuids, list) {
689 if (uuid->size != 32)
690 continue;
691
692 if (!uuids_start) {
693 uuids_start = ptr;
694 uuids_start[0] = 1;
695 uuids_start[1] = EIR_UUID32_ALL;
696 ptr += 2;
697 }
698
699 /* Stop if not enough space to put next UUID */
700 if ((ptr - data) + sizeof(u32) > len) {
701 uuids_start[1] = EIR_UUID32_SOME;
702 break;
703 }
704
705 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
706 ptr += sizeof(u32);
707 uuids_start[0] += sizeof(u32);
708 }
709
710 return ptr;
711 }
712
713 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
714 {
715 u8 *ptr = data, *uuids_start = NULL;
716 struct bt_uuid *uuid;
717
718 if (len < 18)
719 return ptr;
720
721 list_for_each_entry(uuid, &hdev->uuids, list) {
722 if (uuid->size != 128)
723 continue;
724
725 if (!uuids_start) {
726 uuids_start = ptr;
727 uuids_start[0] = 1;
728 uuids_start[1] = EIR_UUID128_ALL;
729 ptr += 2;
730 }
731
732 /* Stop if not enough space to put next UUID */
733 if ((ptr - data) + 16 > len) {
734 uuids_start[1] = EIR_UUID128_SOME;
735 break;
736 }
737
738 memcpy(ptr, uuid->uuid, 16);
739 ptr += 16;
740 uuids_start[0] += 16;
741 }
742
743 return ptr;
744 }
745
746 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
747 {
748 struct pending_cmd *cmd;
749
750 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
751 if (cmd->opcode == opcode)
752 return cmd;
753 }
754
755 return NULL;
756 }
757
758 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
759 struct hci_dev *hdev,
760 const void *data)
761 {
762 struct pending_cmd *cmd;
763
764 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
765 if (cmd->user_data != data)
766 continue;
767 if (cmd->opcode == opcode)
768 return cmd;
769 }
770
771 return NULL;
772 }
773
774 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
775 {
776 u8 ad_len = 0;
777 size_t name_len;
778
779 name_len = strlen(hdev->dev_name);
780 if (name_len > 0) {
781 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
782
783 if (name_len > max_len) {
784 name_len = max_len;
785 ptr[1] = EIR_NAME_SHORT;
786 } else
787 ptr[1] = EIR_NAME_COMPLETE;
788
789 ptr[0] = name_len + 1;
790
791 memcpy(ptr + 2, hdev->dev_name, name_len);
792
793 ad_len += (name_len + 2);
794 ptr += (name_len + 2);
795 }
796
797 return ad_len;
798 }
799
800 static void update_scan_rsp_data(struct hci_request *req)
801 {
802 struct hci_dev *hdev = req->hdev;
803 struct hci_cp_le_set_scan_rsp_data cp;
804 u8 len;
805
806 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
807 return;
808
809 memset(&cp, 0, sizeof(cp));
810
811 len = create_scan_rsp_data(hdev, cp.data);
812
813 if (hdev->scan_rsp_data_len == len &&
814 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
815 return;
816
817 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
818 hdev->scan_rsp_data_len = len;
819
820 cp.length = len;
821
822 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
823 }
824
825 static u8 get_adv_discov_flags(struct hci_dev *hdev)
826 {
827 struct pending_cmd *cmd;
828
829 /* If there's a pending mgmt command the flags will not yet have
830 * their final values, so check for this first.
831 */
832 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
833 if (cmd) {
834 struct mgmt_mode *cp = cmd->param;
835 if (cp->val == 0x01)
836 return LE_AD_GENERAL;
837 else if (cp->val == 0x02)
838 return LE_AD_LIMITED;
839 } else {
840 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
841 return LE_AD_LIMITED;
842 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
843 return LE_AD_GENERAL;
844 }
845
846 return 0;
847 }
848
849 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
850 {
851 u8 ad_len = 0, flags = 0;
852
853 flags |= get_adv_discov_flags(hdev);
854
855 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
856 flags |= LE_AD_NO_BREDR;
857
858 if (flags) {
859 BT_DBG("adv flags 0x%02x", flags);
860
861 ptr[0] = 2;
862 ptr[1] = EIR_FLAGS;
863 ptr[2] = flags;
864
865 ad_len += 3;
866 ptr += 3;
867 }
868
869 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
870 ptr[0] = 2;
871 ptr[1] = EIR_TX_POWER;
872 ptr[2] = (u8) hdev->adv_tx_power;
873
874 ad_len += 3;
875 ptr += 3;
876 }
877
878 return ad_len;
879 }
880
881 static void update_adv_data(struct hci_request *req)
882 {
883 struct hci_dev *hdev = req->hdev;
884 struct hci_cp_le_set_adv_data cp;
885 u8 len;
886
887 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
888 return;
889
890 memset(&cp, 0, sizeof(cp));
891
892 len = create_adv_data(hdev, cp.data);
893
894 if (hdev->adv_data_len == len &&
895 memcmp(cp.data, hdev->adv_data, len) == 0)
896 return;
897
898 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
899 hdev->adv_data_len = len;
900
901 cp.length = len;
902
903 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
904 }
905
906 int mgmt_update_adv_data(struct hci_dev *hdev)
907 {
908 struct hci_request req;
909
910 hci_req_init(&req, hdev);
911 update_adv_data(&req);
912
913 return hci_req_run(&req, NULL);
914 }
915
916 static void create_eir(struct hci_dev *hdev, u8 *data)
917 {
918 u8 *ptr = data;
919 size_t name_len;
920
921 name_len = strlen(hdev->dev_name);
922
923 if (name_len > 0) {
924 /* EIR Data type */
925 if (name_len > 48) {
926 name_len = 48;
927 ptr[1] = EIR_NAME_SHORT;
928 } else
929 ptr[1] = EIR_NAME_COMPLETE;
930
931 /* EIR Data length */
932 ptr[0] = name_len + 1;
933
934 memcpy(ptr + 2, hdev->dev_name, name_len);
935
936 ptr += (name_len + 2);
937 }
938
939 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
940 ptr[0] = 2;
941 ptr[1] = EIR_TX_POWER;
942 ptr[2] = (u8) hdev->inq_tx_power;
943
944 ptr += 3;
945 }
946
947 if (hdev->devid_source > 0) {
948 ptr[0] = 9;
949 ptr[1] = EIR_DEVICE_ID;
950
951 put_unaligned_le16(hdev->devid_source, ptr + 2);
952 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
953 put_unaligned_le16(hdev->devid_product, ptr + 6);
954 put_unaligned_le16(hdev->devid_version, ptr + 8);
955
956 ptr += 10;
957 }
958
959 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
960 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
961 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
962 }
963
964 static void update_eir(struct hci_request *req)
965 {
966 struct hci_dev *hdev = req->hdev;
967 struct hci_cp_write_eir cp;
968
969 if (!hdev_is_powered(hdev))
970 return;
971
972 if (!lmp_ext_inq_capable(hdev))
973 return;
974
975 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
976 return;
977
978 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
979 return;
980
981 memset(&cp, 0, sizeof(cp));
982
983 create_eir(hdev, cp.data);
984
985 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
986 return;
987
988 memcpy(hdev->eir, cp.data, sizeof(cp.data));
989
990 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
991 }
992
993 static u8 get_service_classes(struct hci_dev *hdev)
994 {
995 struct bt_uuid *uuid;
996 u8 val = 0;
997
998 list_for_each_entry(uuid, &hdev->uuids, list)
999 val |= uuid->svc_hint;
1000
1001 return val;
1002 }
1003
1004 static void update_class(struct hci_request *req)
1005 {
1006 struct hci_dev *hdev = req->hdev;
1007 u8 cod[3];
1008
1009 BT_DBG("%s", hdev->name);
1010
1011 if (!hdev_is_powered(hdev))
1012 return;
1013
1014 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1015 return;
1016
1017 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1018 return;
1019
1020 cod[0] = hdev->minor_class;
1021 cod[1] = hdev->major_class;
1022 cod[2] = get_service_classes(hdev);
1023
1024 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1025 cod[1] |= 0x20;
1026
1027 if (memcmp(cod, hdev->dev_class, 3) == 0)
1028 return;
1029
1030 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1031 }
1032
1033 static bool get_connectable(struct hci_dev *hdev)
1034 {
1035 struct pending_cmd *cmd;
1036
1037 /* If there's a pending mgmt command the flag will not yet have
1038 * it's final value, so check for this first.
1039 */
1040 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1041 if (cmd) {
1042 struct mgmt_mode *cp = cmd->param;
1043 return cp->val;
1044 }
1045
1046 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1047 }
1048
1049 static void disable_advertising(struct hci_request *req)
1050 {
1051 u8 enable = 0x00;
1052
1053 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1054 }
1055
1056 static void enable_advertising(struct hci_request *req)
1057 {
1058 struct hci_dev *hdev = req->hdev;
1059 struct hci_cp_le_set_adv_param cp;
1060 u8 own_addr_type, enable = 0x01;
1061 bool connectable;
1062
1063 if (hci_conn_num(hdev, LE_LINK) > 0)
1064 return;
1065
1066 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1067 disable_advertising(req);
1068
1069 /* Clear the HCI_LE_ADV bit temporarily so that the
1070 * hci_update_random_address knows that it's safe to go ahead
1071 * and write a new random address. The flag will be set back on
1072 * as soon as the SET_ADV_ENABLE HCI command completes.
1073 */
1074 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1075
1076 connectable = get_connectable(hdev);
1077
1078 /* Set require_privacy to true only when non-connectable
1079 * advertising is used. In that case it is fine to use a
1080 * non-resolvable private address.
1081 */
1082 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1083 return;
1084
1085 memset(&cp, 0, sizeof(cp));
1086 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1087 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1088 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1089 cp.own_address_type = own_addr_type;
1090 cp.channel_map = hdev->le_adv_channel_map;
1091
1092 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1093
1094 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1095 }
1096
1097 static void service_cache_off(struct work_struct *work)
1098 {
1099 struct hci_dev *hdev = container_of(work, struct hci_dev,
1100 service_cache.work);
1101 struct hci_request req;
1102
1103 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1104 return;
1105
1106 hci_req_init(&req, hdev);
1107
1108 hci_dev_lock(hdev);
1109
1110 update_eir(&req);
1111 update_class(&req);
1112
1113 hci_dev_unlock(hdev);
1114
1115 hci_req_run(&req, NULL);
1116 }
1117
1118 static void rpa_expired(struct work_struct *work)
1119 {
1120 struct hci_dev *hdev = container_of(work, struct hci_dev,
1121 rpa_expired.work);
1122 struct hci_request req;
1123
1124 BT_DBG("");
1125
1126 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1127
1128 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1129 return;
1130
1131 /* The generation of a new RPA and programming it into the
1132 * controller happens in the enable_advertising() function.
1133 */
1134 hci_req_init(&req, hdev);
1135 enable_advertising(&req);
1136 hci_req_run(&req, NULL);
1137 }
1138
1139 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1140 {
1141 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1142 return;
1143
1144 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1145 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1146
1147 /* Non-mgmt controlled devices get this bit set
1148 * implicitly so that pairing works for them, however
1149 * for mgmt we require user-space to explicitly enable
1150 * it
1151 */
1152 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1153 }
1154
1155 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1156 void *data, u16 data_len)
1157 {
1158 struct mgmt_rp_read_info rp;
1159
1160 BT_DBG("sock %p %s", sk, hdev->name);
1161
1162 hci_dev_lock(hdev);
1163
1164 memset(&rp, 0, sizeof(rp));
1165
1166 bacpy(&rp.bdaddr, &hdev->bdaddr);
1167
1168 rp.version = hdev->hci_ver;
1169 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1170
1171 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1172 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1173
1174 memcpy(rp.dev_class, hdev->dev_class, 3);
1175
1176 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1177 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1178
1179 hci_dev_unlock(hdev);
1180
1181 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1182 sizeof(rp));
1183 }
1184
1185 static void mgmt_pending_free(struct pending_cmd *cmd)
1186 {
1187 sock_put(cmd->sk);
1188 kfree(cmd->param);
1189 kfree(cmd);
1190 }
1191
1192 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1193 struct hci_dev *hdev, void *data,
1194 u16 len)
1195 {
1196 struct pending_cmd *cmd;
1197
1198 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1199 if (!cmd)
1200 return NULL;
1201
1202 cmd->opcode = opcode;
1203 cmd->index = hdev->id;
1204
1205 cmd->param = kmalloc(len, GFP_KERNEL);
1206 if (!cmd->param) {
1207 kfree(cmd);
1208 return NULL;
1209 }
1210
1211 if (data)
1212 memcpy(cmd->param, data, len);
1213
1214 cmd->sk = sk;
1215 sock_hold(sk);
1216
1217 list_add(&cmd->list, &hdev->mgmt_pending);
1218
1219 return cmd;
1220 }
1221
1222 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1223 void (*cb)(struct pending_cmd *cmd,
1224 void *data),
1225 void *data)
1226 {
1227 struct pending_cmd *cmd, *tmp;
1228
1229 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1230 if (opcode > 0 && cmd->opcode != opcode)
1231 continue;
1232
1233 cb(cmd, data);
1234 }
1235 }
1236
1237 static void mgmt_pending_remove(struct pending_cmd *cmd)
1238 {
1239 list_del(&cmd->list);
1240 mgmt_pending_free(cmd);
1241 }
1242
1243 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1244 {
1245 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1246
1247 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1248 sizeof(settings));
1249 }
1250
1251 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1252 {
1253 BT_DBG("%s status 0x%02x", hdev->name, status);
1254
1255 if (hci_conn_count(hdev) == 0) {
1256 cancel_delayed_work(&hdev->power_off);
1257 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1258 }
1259 }
1260
1261 static bool hci_stop_discovery(struct hci_request *req)
1262 {
1263 struct hci_dev *hdev = req->hdev;
1264 struct hci_cp_remote_name_req_cancel cp;
1265 struct inquiry_entry *e;
1266
1267 switch (hdev->discovery.state) {
1268 case DISCOVERY_FINDING:
1269 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1270 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1271 } else {
1272 cancel_delayed_work(&hdev->le_scan_disable);
1273 hci_req_add_le_scan_disable(req);
1274 }
1275
1276 return true;
1277
1278 case DISCOVERY_RESOLVING:
1279 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1280 NAME_PENDING);
1281 if (!e)
1282 break;
1283
1284 bacpy(&cp.bdaddr, &e->data.bdaddr);
1285 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1286 &cp);
1287
1288 return true;
1289
1290 default:
1291 /* Passive scanning */
1292 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1293 hci_req_add_le_scan_disable(req);
1294 return true;
1295 }
1296
1297 break;
1298 }
1299
1300 return false;
1301 }
1302
1303 static int clean_up_hci_state(struct hci_dev *hdev)
1304 {
1305 struct hci_request req;
1306 struct hci_conn *conn;
1307 bool discov_stopped;
1308 int err;
1309
1310 hci_req_init(&req, hdev);
1311
1312 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1313 test_bit(HCI_PSCAN, &hdev->flags)) {
1314 u8 scan = 0x00;
1315 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1316 }
1317
1318 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1319 disable_advertising(&req);
1320
1321 discov_stopped = hci_stop_discovery(&req);
1322
1323 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1324 struct hci_cp_disconnect dc;
1325 struct hci_cp_reject_conn_req rej;
1326
1327 switch (conn->state) {
1328 case BT_CONNECTED:
1329 case BT_CONFIG:
1330 dc.handle = cpu_to_le16(conn->handle);
1331 dc.reason = 0x15; /* Terminated due to Power Off */
1332 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1333 break;
1334 case BT_CONNECT:
1335 if (conn->type == LE_LINK)
1336 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1337 0, NULL);
1338 else if (conn->type == ACL_LINK)
1339 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1340 6, &conn->dst);
1341 break;
1342 case BT_CONNECT2:
1343 bacpy(&rej.bdaddr, &conn->dst);
1344 rej.reason = 0x15; /* Terminated due to Power Off */
1345 if (conn->type == ACL_LINK)
1346 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1347 sizeof(rej), &rej);
1348 else if (conn->type == SCO_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1350 sizeof(rej), &rej);
1351 break;
1352 }
1353 }
1354
1355 err = hci_req_run(&req, clean_up_hci_complete);
1356 if (!err && discov_stopped)
1357 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1358
1359 return err;
1360 }
1361
1362 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1363 u16 len)
1364 {
1365 struct mgmt_mode *cp = data;
1366 struct pending_cmd *cmd;
1367 int err;
1368
1369 BT_DBG("request for %s", hdev->name);
1370
1371 if (cp->val != 0x00 && cp->val != 0x01)
1372 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1373 MGMT_STATUS_INVALID_PARAMS);
1374
1375 hci_dev_lock(hdev);
1376
1377 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1378 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 MGMT_STATUS_BUSY);
1380 goto failed;
1381 }
1382
1383 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1384 cancel_delayed_work(&hdev->power_off);
1385
1386 if (cp->val) {
1387 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1388 data, len);
1389 err = mgmt_powered(hdev, 1);
1390 goto failed;
1391 }
1392 }
1393
1394 if (!!cp->val == hdev_is_powered(hdev)) {
1395 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1396 goto failed;
1397 }
1398
1399 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1400 if (!cmd) {
1401 err = -ENOMEM;
1402 goto failed;
1403 }
1404
1405 if (cp->val) {
1406 queue_work(hdev->req_workqueue, &hdev->power_on);
1407 err = 0;
1408 } else {
1409 /* Disconnect connections, stop scans, etc */
1410 err = clean_up_hci_state(hdev);
1411 if (!err)
1412 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1413 HCI_POWER_OFF_TIMEOUT);
1414
1415 /* ENODATA means there were no HCI commands queued */
1416 if (err == -ENODATA) {
1417 cancel_delayed_work(&hdev->power_off);
1418 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1419 err = 0;
1420 }
1421 }
1422
1423 failed:
1424 hci_dev_unlock(hdev);
1425 return err;
1426 }
1427
1428 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1429 {
1430 __le32 ev;
1431
1432 ev = cpu_to_le32(get_current_settings(hdev));
1433
1434 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1435 }
1436
1437 int mgmt_new_settings(struct hci_dev *hdev)
1438 {
1439 return new_settings(hdev, NULL);
1440 }
1441
1442 struct cmd_lookup {
1443 struct sock *sk;
1444 struct hci_dev *hdev;
1445 u8 mgmt_status;
1446 };
1447
1448 static void settings_rsp(struct pending_cmd *cmd, void *data)
1449 {
1450 struct cmd_lookup *match = data;
1451
1452 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1453
1454 list_del(&cmd->list);
1455
1456 if (match->sk == NULL) {
1457 match->sk = cmd->sk;
1458 sock_hold(match->sk);
1459 }
1460
1461 mgmt_pending_free(cmd);
1462 }
1463
1464 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1465 {
1466 u8 *status = data;
1467
1468 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1469 mgmt_pending_remove(cmd);
1470 }
1471
1472 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1473 {
1474 if (!lmp_bredr_capable(hdev))
1475 return MGMT_STATUS_NOT_SUPPORTED;
1476 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1477 return MGMT_STATUS_REJECTED;
1478 else
1479 return MGMT_STATUS_SUCCESS;
1480 }
1481
1482 static u8 mgmt_le_support(struct hci_dev *hdev)
1483 {
1484 if (!lmp_le_capable(hdev))
1485 return MGMT_STATUS_NOT_SUPPORTED;
1486 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1487 return MGMT_STATUS_REJECTED;
1488 else
1489 return MGMT_STATUS_SUCCESS;
1490 }
1491
1492 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1493 {
1494 struct pending_cmd *cmd;
1495 struct mgmt_mode *cp;
1496 struct hci_request req;
1497 bool changed;
1498
1499 BT_DBG("status 0x%02x", status);
1500
1501 hci_dev_lock(hdev);
1502
1503 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1504 if (!cmd)
1505 goto unlock;
1506
1507 if (status) {
1508 u8 mgmt_err = mgmt_status(status);
1509 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1510 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1511 goto remove_cmd;
1512 }
1513
1514 cp = cmd->param;
1515 if (cp->val) {
1516 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1517 &hdev->dev_flags);
1518
1519 if (hdev->discov_timeout > 0) {
1520 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1522 to);
1523 }
1524 } else {
1525 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1526 &hdev->dev_flags);
1527 }
1528
1529 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1530
1531 if (changed)
1532 new_settings(hdev, cmd->sk);
1533
1534 /* When the discoverable mode gets changed, make sure
1535 * that class of device has the limited discoverable
1536 * bit correctly set. Also update page scan based on whitelist
1537 * entries.
1538 */
1539 hci_req_init(&req, hdev);
1540 hci_update_page_scan(hdev, &req);
1541 update_class(&req);
1542 hci_req_run(&req, NULL);
1543
1544 remove_cmd:
1545 mgmt_pending_remove(cmd);
1546
1547 unlock:
1548 hci_dev_unlock(hdev);
1549 }
1550
1551 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1552 u16 len)
1553 {
1554 struct mgmt_cp_set_discoverable *cp = data;
1555 struct pending_cmd *cmd;
1556 struct hci_request req;
1557 u16 timeout;
1558 u8 scan;
1559 int err;
1560
1561 BT_DBG("request for %s", hdev->name);
1562
1563 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1564 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1565 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_REJECTED);
1567
1568 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1569 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1570 MGMT_STATUS_INVALID_PARAMS);
1571
1572 timeout = __le16_to_cpu(cp->timeout);
1573
1574 /* Disabling discoverable requires that no timeout is set,
1575 * and enabling limited discoverable requires a timeout.
1576 */
1577 if ((cp->val == 0x00 && timeout > 0) ||
1578 (cp->val == 0x02 && timeout == 0))
1579 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 MGMT_STATUS_INVALID_PARAMS);
1581
1582 hci_dev_lock(hdev);
1583
1584 if (!hdev_is_powered(hdev) && timeout > 0) {
1585 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 MGMT_STATUS_NOT_POWERED);
1587 goto failed;
1588 }
1589
1590 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 MGMT_STATUS_BUSY);
1594 goto failed;
1595 }
1596
1597 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1598 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_REJECTED);
1600 goto failed;
1601 }
1602
1603 if (!hdev_is_powered(hdev)) {
1604 bool changed = false;
1605
1606 /* Setting limited discoverable when powered off is
1607 * not a valid operation since it requires a timeout
1608 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1609 */
1610 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1611 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1612 changed = true;
1613 }
1614
1615 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1616 if (err < 0)
1617 goto failed;
1618
1619 if (changed)
1620 err = new_settings(hdev, sk);
1621
1622 goto failed;
1623 }
1624
1625 /* If the current mode is the same, then just update the timeout
1626 * value with the new value. And if only the timeout gets updated,
1627 * then no need for any HCI transactions.
1628 */
1629 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1630 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1631 &hdev->dev_flags)) {
1632 cancel_delayed_work(&hdev->discov_off);
1633 hdev->discov_timeout = timeout;
1634
1635 if (cp->val && hdev->discov_timeout > 0) {
1636 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1637 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1638 to);
1639 }
1640
1641 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1642 goto failed;
1643 }
1644
1645 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1646 if (!cmd) {
1647 err = -ENOMEM;
1648 goto failed;
1649 }
1650
1651 /* Cancel any potential discoverable timeout that might be
1652 * still active and store new timeout value. The arming of
1653 * the timeout happens in the complete handler.
1654 */
1655 cancel_delayed_work(&hdev->discov_off);
1656 hdev->discov_timeout = timeout;
1657
1658 /* Limited discoverable mode */
1659 if (cp->val == 0x02)
1660 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1661 else
1662 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1663
1664 hci_req_init(&req, hdev);
1665
1666 /* The procedure for LE-only controllers is much simpler - just
1667 * update the advertising data.
1668 */
1669 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1670 goto update_ad;
1671
1672 scan = SCAN_PAGE;
1673
1674 if (cp->val) {
1675 struct hci_cp_write_current_iac_lap hci_cp;
1676
1677 if (cp->val == 0x02) {
1678 /* Limited discoverable mode */
1679 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1680 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1681 hci_cp.iac_lap[1] = 0x8b;
1682 hci_cp.iac_lap[2] = 0x9e;
1683 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1684 hci_cp.iac_lap[4] = 0x8b;
1685 hci_cp.iac_lap[5] = 0x9e;
1686 } else {
1687 /* General discoverable mode */
1688 hci_cp.num_iac = 1;
1689 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1690 hci_cp.iac_lap[1] = 0x8b;
1691 hci_cp.iac_lap[2] = 0x9e;
1692 }
1693
1694 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1695 (hci_cp.num_iac * 3) + 1, &hci_cp);
1696
1697 scan |= SCAN_INQUIRY;
1698 } else {
1699 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1700 }
1701
1702 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1703
1704 update_ad:
1705 update_adv_data(&req);
1706
1707 err = hci_req_run(&req, set_discoverable_complete);
1708 if (err < 0)
1709 mgmt_pending_remove(cmd);
1710
1711 failed:
1712 hci_dev_unlock(hdev);
1713 return err;
1714 }
1715
1716 static void write_fast_connectable(struct hci_request *req, bool enable)
1717 {
1718 struct hci_dev *hdev = req->hdev;
1719 struct hci_cp_write_page_scan_activity acp;
1720 u8 type;
1721
1722 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1723 return;
1724
1725 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1726 return;
1727
1728 if (enable) {
1729 type = PAGE_SCAN_TYPE_INTERLACED;
1730
1731 /* 160 msec page scan interval */
1732 acp.interval = cpu_to_le16(0x0100);
1733 } else {
1734 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1735
1736 /* default 1.28 sec page scan */
1737 acp.interval = cpu_to_le16(0x0800);
1738 }
1739
1740 acp.window = cpu_to_le16(0x0012);
1741
1742 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1743 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1744 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1745 sizeof(acp), &acp);
1746
1747 if (hdev->page_scan_type != type)
1748 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1749 }
1750
1751 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1752 {
1753 struct pending_cmd *cmd;
1754 struct mgmt_mode *cp;
1755 bool conn_changed, discov_changed;
1756
1757 BT_DBG("status 0x%02x", status);
1758
1759 hci_dev_lock(hdev);
1760
1761 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1762 if (!cmd)
1763 goto unlock;
1764
1765 if (status) {
1766 u8 mgmt_err = mgmt_status(status);
1767 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1768 goto remove_cmd;
1769 }
1770
1771 cp = cmd->param;
1772 if (cp->val) {
1773 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1774 &hdev->dev_flags);
1775 discov_changed = false;
1776 } else {
1777 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1778 &hdev->dev_flags);
1779 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1780 &hdev->dev_flags);
1781 }
1782
1783 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1784
1785 if (conn_changed || discov_changed) {
1786 new_settings(hdev, cmd->sk);
1787 hci_update_page_scan(hdev, NULL);
1788 if (discov_changed)
1789 mgmt_update_adv_data(hdev);
1790 hci_update_background_scan(hdev);
1791 }
1792
1793 remove_cmd:
1794 mgmt_pending_remove(cmd);
1795
1796 unlock:
1797 hci_dev_unlock(hdev);
1798 }
1799
1800 static int set_connectable_update_settings(struct hci_dev *hdev,
1801 struct sock *sk, u8 val)
1802 {
1803 bool changed = false;
1804 int err;
1805
1806 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1807 changed = true;
1808
1809 if (val) {
1810 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1811 } else {
1812 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1813 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1814 }
1815
1816 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1817 if (err < 0)
1818 return err;
1819
1820 if (changed) {
1821 hci_update_page_scan(hdev, NULL);
1822 hci_update_background_scan(hdev);
1823 return new_settings(hdev, sk);
1824 }
1825
1826 return 0;
1827 }
1828
1829 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1830 u16 len)
1831 {
1832 struct mgmt_mode *cp = data;
1833 struct pending_cmd *cmd;
1834 struct hci_request req;
1835 u8 scan;
1836 int err;
1837
1838 BT_DBG("request for %s", hdev->name);
1839
1840 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1841 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1842 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1843 MGMT_STATUS_REJECTED);
1844
1845 if (cp->val != 0x00 && cp->val != 0x01)
1846 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1847 MGMT_STATUS_INVALID_PARAMS);
1848
1849 hci_dev_lock(hdev);
1850
1851 if (!hdev_is_powered(hdev)) {
1852 err = set_connectable_update_settings(hdev, sk, cp->val);
1853 goto failed;
1854 }
1855
1856 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1857 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1858 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1859 MGMT_STATUS_BUSY);
1860 goto failed;
1861 }
1862
1863 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1864 if (!cmd) {
1865 err = -ENOMEM;
1866 goto failed;
1867 }
1868
1869 hci_req_init(&req, hdev);
1870
1871 /* If BR/EDR is not enabled and we disable advertising as a
1872 * by-product of disabling connectable, we need to update the
1873 * advertising flags.
1874 */
1875 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1876 if (!cp->val) {
1877 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1878 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1879 }
1880 update_adv_data(&req);
1881 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1882 if (cp->val) {
1883 scan = SCAN_PAGE;
1884 } else {
1885 /* If we don't have any whitelist entries just
1886 * disable all scanning. If there are entries
1887 * and we had both page and inquiry scanning
1888 * enabled then fall back to only page scanning.
1889 * Otherwise no changes are needed.
1890 */
1891 if (list_empty(&hdev->whitelist))
1892 scan = SCAN_DISABLED;
1893 else if (test_bit(HCI_ISCAN, &hdev->flags))
1894 scan = SCAN_PAGE;
1895 else
1896 goto no_scan_update;
1897
1898 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1899 hdev->discov_timeout > 0)
1900 cancel_delayed_work(&hdev->discov_off);
1901 }
1902
1903 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1904 }
1905
1906 no_scan_update:
1907 /* If we're going from non-connectable to connectable or
1908 * vice-versa when fast connectable is enabled ensure that fast
1909 * connectable gets disabled. write_fast_connectable won't do
1910 * anything if the page scan parameters are already what they
1911 * should be.
1912 */
1913 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1914 write_fast_connectable(&req, false);
1915
1916 /* Update the advertising parameters if necessary */
1917 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1918 enable_advertising(&req);
1919
1920 err = hci_req_run(&req, set_connectable_complete);
1921 if (err < 0) {
1922 mgmt_pending_remove(cmd);
1923 if (err == -ENODATA)
1924 err = set_connectable_update_settings(hdev, sk,
1925 cp->val);
1926 goto failed;
1927 }
1928
1929 failed:
1930 hci_dev_unlock(hdev);
1931 return err;
1932 }
1933
1934 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1935 u16 len)
1936 {
1937 struct mgmt_mode *cp = data;
1938 bool changed;
1939 int err;
1940
1941 BT_DBG("request for %s", hdev->name);
1942
1943 if (cp->val != 0x00 && cp->val != 0x01)
1944 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1945 MGMT_STATUS_INVALID_PARAMS);
1946
1947 hci_dev_lock(hdev);
1948
1949 if (cp->val)
1950 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1951 else
1952 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1953
1954 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1955 if (err < 0)
1956 goto unlock;
1957
1958 if (changed)
1959 err = new_settings(hdev, sk);
1960
1961 unlock:
1962 hci_dev_unlock(hdev);
1963 return err;
1964 }
1965
1966 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1967 u16 len)
1968 {
1969 struct mgmt_mode *cp = data;
1970 struct pending_cmd *cmd;
1971 u8 val, status;
1972 int err;
1973
1974 BT_DBG("request for %s", hdev->name);
1975
1976 status = mgmt_bredr_support(hdev);
1977 if (status)
1978 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1979 status);
1980
1981 if (cp->val != 0x00 && cp->val != 0x01)
1982 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1983 MGMT_STATUS_INVALID_PARAMS);
1984
1985 hci_dev_lock(hdev);
1986
1987 if (!hdev_is_powered(hdev)) {
1988 bool changed = false;
1989
1990 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1991 &hdev->dev_flags)) {
1992 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1993 changed = true;
1994 }
1995
1996 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1997 if (err < 0)
1998 goto failed;
1999
2000 if (changed)
2001 err = new_settings(hdev, sk);
2002
2003 goto failed;
2004 }
2005
2006 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2007 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2008 MGMT_STATUS_BUSY);
2009 goto failed;
2010 }
2011
2012 val = !!cp->val;
2013
2014 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2015 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2016 goto failed;
2017 }
2018
2019 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2020 if (!cmd) {
2021 err = -ENOMEM;
2022 goto failed;
2023 }
2024
2025 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2026 if (err < 0) {
2027 mgmt_pending_remove(cmd);
2028 goto failed;
2029 }
2030
2031 failed:
2032 hci_dev_unlock(hdev);
2033 return err;
2034 }
2035
2036 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2037 {
2038 struct mgmt_mode *cp = data;
2039 struct pending_cmd *cmd;
2040 u8 status;
2041 int err;
2042
2043 BT_DBG("request for %s", hdev->name);
2044
2045 status = mgmt_bredr_support(hdev);
2046 if (status)
2047 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2048
2049 if (!lmp_ssp_capable(hdev))
2050 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2051 MGMT_STATUS_NOT_SUPPORTED);
2052
2053 if (cp->val != 0x00 && cp->val != 0x01)
2054 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2055 MGMT_STATUS_INVALID_PARAMS);
2056
2057 hci_dev_lock(hdev);
2058
2059 if (!hdev_is_powered(hdev)) {
2060 bool changed;
2061
2062 if (cp->val) {
2063 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2064 &hdev->dev_flags);
2065 } else {
2066 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2067 &hdev->dev_flags);
2068 if (!changed)
2069 changed = test_and_clear_bit(HCI_HS_ENABLED,
2070 &hdev->dev_flags);
2071 else
2072 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2073 }
2074
2075 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2076 if (err < 0)
2077 goto failed;
2078
2079 if (changed)
2080 err = new_settings(hdev, sk);
2081
2082 goto failed;
2083 }
2084
2085 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2086 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2087 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2088 MGMT_STATUS_BUSY);
2089 goto failed;
2090 }
2091
2092 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2093 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2094 goto failed;
2095 }
2096
2097 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2098 if (!cmd) {
2099 err = -ENOMEM;
2100 goto failed;
2101 }
2102
2103 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2104 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2105 sizeof(cp->val), &cp->val);
2106
2107 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2108 if (err < 0) {
2109 mgmt_pending_remove(cmd);
2110 goto failed;
2111 }
2112
2113 failed:
2114 hci_dev_unlock(hdev);
2115 return err;
2116 }
2117
2118 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2119 {
2120 struct mgmt_mode *cp = data;
2121 bool changed;
2122 u8 status;
2123 int err;
2124
2125 BT_DBG("request for %s", hdev->name);
2126
2127 status = mgmt_bredr_support(hdev);
2128 if (status)
2129 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2130
2131 if (!lmp_ssp_capable(hdev))
2132 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2133 MGMT_STATUS_NOT_SUPPORTED);
2134
2135 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2136 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2137 MGMT_STATUS_REJECTED);
2138
2139 if (cp->val != 0x00 && cp->val != 0x01)
2140 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2141 MGMT_STATUS_INVALID_PARAMS);
2142
2143 hci_dev_lock(hdev);
2144
2145 if (cp->val) {
2146 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2147 } else {
2148 if (hdev_is_powered(hdev)) {
2149 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2150 MGMT_STATUS_REJECTED);
2151 goto unlock;
2152 }
2153
2154 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2155 }
2156
2157 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2158 if (err < 0)
2159 goto unlock;
2160
2161 if (changed)
2162 err = new_settings(hdev, sk);
2163
2164 unlock:
2165 hci_dev_unlock(hdev);
2166 return err;
2167 }
2168
2169 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2170 {
2171 struct cmd_lookup match = { NULL, hdev };
2172
2173 if (status) {
2174 u8 mgmt_err = mgmt_status(status);
2175
2176 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2177 &mgmt_err);
2178 return;
2179 }
2180
2181 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2182
2183 new_settings(hdev, match.sk);
2184
2185 if (match.sk)
2186 sock_put(match.sk);
2187
2188 /* Make sure the controller has a good default for
2189 * advertising data. Restrict the update to when LE
2190 * has actually been enabled. During power on, the
2191 * update in powered_update_hci will take care of it.
2192 */
2193 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2194 struct hci_request req;
2195
2196 hci_dev_lock(hdev);
2197
2198 hci_req_init(&req, hdev);
2199 update_adv_data(&req);
2200 update_scan_rsp_data(&req);
2201 hci_req_run(&req, NULL);
2202
2203 hci_update_background_scan(hdev);
2204
2205 hci_dev_unlock(hdev);
2206 }
2207 }
2208
2209 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2210 {
2211 struct mgmt_mode *cp = data;
2212 struct hci_cp_write_le_host_supported hci_cp;
2213 struct pending_cmd *cmd;
2214 struct hci_request req;
2215 int err;
2216 u8 val, enabled;
2217
2218 BT_DBG("request for %s", hdev->name);
2219
2220 if (!lmp_le_capable(hdev))
2221 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2222 MGMT_STATUS_NOT_SUPPORTED);
2223
2224 if (cp->val != 0x00 && cp->val != 0x01)
2225 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2226 MGMT_STATUS_INVALID_PARAMS);
2227
2228 /* LE-only devices do not allow toggling LE on/off */
2229 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2230 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2231 MGMT_STATUS_REJECTED);
2232
2233 hci_dev_lock(hdev);
2234
2235 val = !!cp->val;
2236 enabled = lmp_host_le_capable(hdev);
2237
2238 if (!hdev_is_powered(hdev) || val == enabled) {
2239 bool changed = false;
2240
2241 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2242 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2243 changed = true;
2244 }
2245
2246 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2247 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2248 changed = true;
2249 }
2250
2251 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2252 if (err < 0)
2253 goto unlock;
2254
2255 if (changed)
2256 err = new_settings(hdev, sk);
2257
2258 goto unlock;
2259 }
2260
2261 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2262 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2263 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2264 MGMT_STATUS_BUSY);
2265 goto unlock;
2266 }
2267
2268 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2269 if (!cmd) {
2270 err = -ENOMEM;
2271 goto unlock;
2272 }
2273
2274 hci_req_init(&req, hdev);
2275
2276 memset(&hci_cp, 0, sizeof(hci_cp));
2277
2278 if (val) {
2279 hci_cp.le = val;
2280 hci_cp.simul = 0x00;
2281 } else {
2282 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2283 disable_advertising(&req);
2284 }
2285
2286 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2287 &hci_cp);
2288
2289 err = hci_req_run(&req, le_enable_complete);
2290 if (err < 0)
2291 mgmt_pending_remove(cmd);
2292
2293 unlock:
2294 hci_dev_unlock(hdev);
2295 return err;
2296 }
2297
2298 /* This is a helper function to test for pending mgmt commands that can
2299 * cause CoD or EIR HCI commands. We can only allow one such pending
2300 * mgmt command at a time since otherwise we cannot easily track what
2301 * the current values are, will be, and based on that calculate if a new
2302 * HCI command needs to be sent and if yes with what value.
2303 */
2304 static bool pending_eir_or_class(struct hci_dev *hdev)
2305 {
2306 struct pending_cmd *cmd;
2307
2308 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2309 switch (cmd->opcode) {
2310 case MGMT_OP_ADD_UUID:
2311 case MGMT_OP_REMOVE_UUID:
2312 case MGMT_OP_SET_DEV_CLASS:
2313 case MGMT_OP_SET_POWERED:
2314 return true;
2315 }
2316 }
2317
2318 return false;
2319 }
2320
2321 static const u8 bluetooth_base_uuid[] = {
2322 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2323 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2324 };
2325
2326 static u8 get_uuid_size(const u8 *uuid)
2327 {
2328 u32 val;
2329
2330 if (memcmp(uuid, bluetooth_base_uuid, 12))
2331 return 128;
2332
2333 val = get_unaligned_le32(&uuid[12]);
2334 if (val > 0xffff)
2335 return 32;
2336
2337 return 16;
2338 }
2339
2340 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2341 {
2342 struct pending_cmd *cmd;
2343
2344 hci_dev_lock(hdev);
2345
2346 cmd = mgmt_pending_find(mgmt_op, hdev);
2347 if (!cmd)
2348 goto unlock;
2349
2350 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2351 hdev->dev_class, 3);
2352
2353 mgmt_pending_remove(cmd);
2354
2355 unlock:
2356 hci_dev_unlock(hdev);
2357 }
2358
2359 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2360 {
2361 BT_DBG("status 0x%02x", status);
2362
2363 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2364 }
2365
2366 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2367 {
2368 struct mgmt_cp_add_uuid *cp = data;
2369 struct pending_cmd *cmd;
2370 struct hci_request req;
2371 struct bt_uuid *uuid;
2372 int err;
2373
2374 BT_DBG("request for %s", hdev->name);
2375
2376 hci_dev_lock(hdev);
2377
2378 if (pending_eir_or_class(hdev)) {
2379 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2380 MGMT_STATUS_BUSY);
2381 goto failed;
2382 }
2383
2384 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2385 if (!uuid) {
2386 err = -ENOMEM;
2387 goto failed;
2388 }
2389
2390 memcpy(uuid->uuid, cp->uuid, 16);
2391 uuid->svc_hint = cp->svc_hint;
2392 uuid->size = get_uuid_size(cp->uuid);
2393
2394 list_add_tail(&uuid->list, &hdev->uuids);
2395
2396 hci_req_init(&req, hdev);
2397
2398 update_class(&req);
2399 update_eir(&req);
2400
2401 err = hci_req_run(&req, add_uuid_complete);
2402 if (err < 0) {
2403 if (err != -ENODATA)
2404 goto failed;
2405
2406 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2407 hdev->dev_class, 3);
2408 goto failed;
2409 }
2410
2411 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2412 if (!cmd) {
2413 err = -ENOMEM;
2414 goto failed;
2415 }
2416
2417 err = 0;
2418
2419 failed:
2420 hci_dev_unlock(hdev);
2421 return err;
2422 }
2423
2424 static bool enable_service_cache(struct hci_dev *hdev)
2425 {
2426 if (!hdev_is_powered(hdev))
2427 return false;
2428
2429 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2430 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2431 CACHE_TIMEOUT);
2432 return true;
2433 }
2434
2435 return false;
2436 }
2437
2438 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2439 {
2440 BT_DBG("status 0x%02x", status);
2441
2442 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2443 }
2444
2445 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2446 u16 len)
2447 {
2448 struct mgmt_cp_remove_uuid *cp = data;
2449 struct pending_cmd *cmd;
2450 struct bt_uuid *match, *tmp;
2451 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2452 struct hci_request req;
2453 int err, found;
2454
2455 BT_DBG("request for %s", hdev->name);
2456
2457 hci_dev_lock(hdev);
2458
2459 if (pending_eir_or_class(hdev)) {
2460 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2461 MGMT_STATUS_BUSY);
2462 goto unlock;
2463 }
2464
2465 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2466 hci_uuids_clear(hdev);
2467
2468 if (enable_service_cache(hdev)) {
2469 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2470 0, hdev->dev_class, 3);
2471 goto unlock;
2472 }
2473
2474 goto update_class;
2475 }
2476
2477 found = 0;
2478
2479 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2480 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2481 continue;
2482
2483 list_del(&match->list);
2484 kfree(match);
2485 found++;
2486 }
2487
2488 if (found == 0) {
2489 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2490 MGMT_STATUS_INVALID_PARAMS);
2491 goto unlock;
2492 }
2493
2494 update_class:
2495 hci_req_init(&req, hdev);
2496
2497 update_class(&req);
2498 update_eir(&req);
2499
2500 err = hci_req_run(&req, remove_uuid_complete);
2501 if (err < 0) {
2502 if (err != -ENODATA)
2503 goto unlock;
2504
2505 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2506 hdev->dev_class, 3);
2507 goto unlock;
2508 }
2509
2510 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2511 if (!cmd) {
2512 err = -ENOMEM;
2513 goto unlock;
2514 }
2515
2516 err = 0;
2517
2518 unlock:
2519 hci_dev_unlock(hdev);
2520 return err;
2521 }
2522
2523 static void set_class_complete(struct hci_dev *hdev, u8 status)
2524 {
2525 BT_DBG("status 0x%02x", status);
2526
2527 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2528 }
2529
2530 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2531 u16 len)
2532 {
2533 struct mgmt_cp_set_dev_class *cp = data;
2534 struct pending_cmd *cmd;
2535 struct hci_request req;
2536 int err;
2537
2538 BT_DBG("request for %s", hdev->name);
2539
2540 if (!lmp_bredr_capable(hdev))
2541 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2542 MGMT_STATUS_NOT_SUPPORTED);
2543
2544 hci_dev_lock(hdev);
2545
2546 if (pending_eir_or_class(hdev)) {
2547 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2548 MGMT_STATUS_BUSY);
2549 goto unlock;
2550 }
2551
2552 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2553 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2554 MGMT_STATUS_INVALID_PARAMS);
2555 goto unlock;
2556 }
2557
2558 hdev->major_class = cp->major;
2559 hdev->minor_class = cp->minor;
2560
2561 if (!hdev_is_powered(hdev)) {
2562 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2563 hdev->dev_class, 3);
2564 goto unlock;
2565 }
2566
2567 hci_req_init(&req, hdev);
2568
2569 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2570 hci_dev_unlock(hdev);
2571 cancel_delayed_work_sync(&hdev->service_cache);
2572 hci_dev_lock(hdev);
2573 update_eir(&req);
2574 }
2575
2576 update_class(&req);
2577
2578 err = hci_req_run(&req, set_class_complete);
2579 if (err < 0) {
2580 if (err != -ENODATA)
2581 goto unlock;
2582
2583 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2584 hdev->dev_class, 3);
2585 goto unlock;
2586 }
2587
2588 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2589 if (!cmd) {
2590 err = -ENOMEM;
2591 goto unlock;
2592 }
2593
2594 err = 0;
2595
2596 unlock:
2597 hci_dev_unlock(hdev);
2598 return err;
2599 }
2600
2601 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2602 u16 len)
2603 {
2604 struct mgmt_cp_load_link_keys *cp = data;
2605 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2606 sizeof(struct mgmt_link_key_info));
2607 u16 key_count, expected_len;
2608 bool changed;
2609 int i;
2610
2611 BT_DBG("request for %s", hdev->name);
2612
2613 if (!lmp_bredr_capable(hdev))
2614 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2615 MGMT_STATUS_NOT_SUPPORTED);
2616
2617 key_count = __le16_to_cpu(cp->key_count);
2618 if (key_count > max_key_count) {
2619 BT_ERR("load_link_keys: too big key_count value %u",
2620 key_count);
2621 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2622 MGMT_STATUS_INVALID_PARAMS);
2623 }
2624
2625 expected_len = sizeof(*cp) + key_count *
2626 sizeof(struct mgmt_link_key_info);
2627 if (expected_len != len) {
2628 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2629 expected_len, len);
2630 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2631 MGMT_STATUS_INVALID_PARAMS);
2632 }
2633
2634 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2635 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2636 MGMT_STATUS_INVALID_PARAMS);
2637
2638 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2639 key_count);
2640
2641 for (i = 0; i < key_count; i++) {
2642 struct mgmt_link_key_info *key = &cp->keys[i];
2643
2644 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2645 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2646 MGMT_STATUS_INVALID_PARAMS);
2647 }
2648
2649 hci_dev_lock(hdev);
2650
2651 hci_link_keys_clear(hdev);
2652
2653 if (cp->debug_keys)
2654 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2655 &hdev->dev_flags);
2656 else
2657 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2658 &hdev->dev_flags);
2659
2660 if (changed)
2661 new_settings(hdev, NULL);
2662
2663 for (i = 0; i < key_count; i++) {
2664 struct mgmt_link_key_info *key = &cp->keys[i];
2665
2666 /* Always ignore debug keys and require a new pairing if
2667 * the user wants to use them.
2668 */
2669 if (key->type == HCI_LK_DEBUG_COMBINATION)
2670 continue;
2671
2672 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2673 key->type, key->pin_len, NULL);
2674 }
2675
2676 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2677
2678 hci_dev_unlock(hdev);
2679
2680 return 0;
2681 }
2682
2683 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2684 u8 addr_type, struct sock *skip_sk)
2685 {
2686 struct mgmt_ev_device_unpaired ev;
2687
2688 bacpy(&ev.addr.bdaddr, bdaddr);
2689 ev.addr.type = addr_type;
2690
2691 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2692 skip_sk);
2693 }
2694
2695 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2696 u16 len)
2697 {
2698 struct mgmt_cp_unpair_device *cp = data;
2699 struct mgmt_rp_unpair_device rp;
2700 struct hci_cp_disconnect dc;
2701 struct pending_cmd *cmd;
2702 struct hci_conn *conn;
2703 int err;
2704
2705 memset(&rp, 0, sizeof(rp));
2706 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2707 rp.addr.type = cp->addr.type;
2708
2709 if (!bdaddr_type_is_valid(cp->addr.type))
2710 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2711 MGMT_STATUS_INVALID_PARAMS,
2712 &rp, sizeof(rp));
2713
2714 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2715 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2716 MGMT_STATUS_INVALID_PARAMS,
2717 &rp, sizeof(rp));
2718
2719 hci_dev_lock(hdev);
2720
2721 if (!hdev_is_powered(hdev)) {
2722 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2723 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2724 goto unlock;
2725 }
2726
2727 if (cp->addr.type == BDADDR_BREDR) {
2728 /* If disconnection is requested, then look up the
2729 * connection. If the remote device is connected, it
2730 * will be later used to terminate the link.
2731 *
2732 * Setting it to NULL explicitly will cause no
2733 * termination of the link.
2734 */
2735 if (cp->disconnect)
2736 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2737 &cp->addr.bdaddr);
2738 else
2739 conn = NULL;
2740
2741 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2742 } else {
2743 u8 addr_type;
2744
2745 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2746 &cp->addr.bdaddr);
2747 if (conn) {
2748 /* Defer clearing up the connection parameters
2749 * until closing to give a chance of keeping
2750 * them if a repairing happens.
2751 */
2752 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2753
2754 /* If disconnection is not requested, then
2755 * clear the connection variable so that the
2756 * link is not terminated.
2757 */
2758 if (!cp->disconnect)
2759 conn = NULL;
2760 }
2761
2762 if (cp->addr.type == BDADDR_LE_PUBLIC)
2763 addr_type = ADDR_LE_DEV_PUBLIC;
2764 else
2765 addr_type = ADDR_LE_DEV_RANDOM;
2766
2767 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2768
2769 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2770 }
2771
2772 if (err < 0) {
2773 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2774 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2775 goto unlock;
2776 }
2777
2778 /* If the connection variable is set, then termination of the
2779 * link is requested.
2780 */
2781 if (!conn) {
2782 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2783 &rp, sizeof(rp));
2784 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2785 goto unlock;
2786 }
2787
2788 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2789 sizeof(*cp));
2790 if (!cmd) {
2791 err = -ENOMEM;
2792 goto unlock;
2793 }
2794
2795 dc.handle = cpu_to_le16(conn->handle);
2796 dc.reason = 0x13; /* Remote User Terminated Connection */
2797 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2798 if (err < 0)
2799 mgmt_pending_remove(cmd);
2800
2801 unlock:
2802 hci_dev_unlock(hdev);
2803 return err;
2804 }
2805
2806 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2807 u16 len)
2808 {
2809 struct mgmt_cp_disconnect *cp = data;
2810 struct mgmt_rp_disconnect rp;
2811 struct pending_cmd *cmd;
2812 struct hci_conn *conn;
2813 int err;
2814
2815 BT_DBG("");
2816
2817 memset(&rp, 0, sizeof(rp));
2818 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2819 rp.addr.type = cp->addr.type;
2820
2821 if (!bdaddr_type_is_valid(cp->addr.type))
2822 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2823 MGMT_STATUS_INVALID_PARAMS,
2824 &rp, sizeof(rp));
2825
2826 hci_dev_lock(hdev);
2827
2828 if (!test_bit(HCI_UP, &hdev->flags)) {
2829 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2830 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2831 goto failed;
2832 }
2833
2834 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2835 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2836 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2837 goto failed;
2838 }
2839
2840 if (cp->addr.type == BDADDR_BREDR)
2841 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2842 &cp->addr.bdaddr);
2843 else
2844 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2845
2846 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2847 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2848 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2849 goto failed;
2850 }
2851
2852 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2853 if (!cmd) {
2854 err = -ENOMEM;
2855 goto failed;
2856 }
2857
2858 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2859 if (err < 0)
2860 mgmt_pending_remove(cmd);
2861
2862 failed:
2863 hci_dev_unlock(hdev);
2864 return err;
2865 }
2866
2867 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2868 {
2869 switch (link_type) {
2870 case LE_LINK:
2871 switch (addr_type) {
2872 case ADDR_LE_DEV_PUBLIC:
2873 return BDADDR_LE_PUBLIC;
2874
2875 default:
2876 /* Fallback to LE Random address type */
2877 return BDADDR_LE_RANDOM;
2878 }
2879
2880 default:
2881 /* Fallback to BR/EDR type */
2882 return BDADDR_BREDR;
2883 }
2884 }
2885
2886 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2887 u16 data_len)
2888 {
2889 struct mgmt_rp_get_connections *rp;
2890 struct hci_conn *c;
2891 size_t rp_len;
2892 int err;
2893 u16 i;
2894
2895 BT_DBG("");
2896
2897 hci_dev_lock(hdev);
2898
2899 if (!hdev_is_powered(hdev)) {
2900 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2901 MGMT_STATUS_NOT_POWERED);
2902 goto unlock;
2903 }
2904
2905 i = 0;
2906 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2907 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2908 i++;
2909 }
2910
2911 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2912 rp = kmalloc(rp_len, GFP_KERNEL);
2913 if (!rp) {
2914 err = -ENOMEM;
2915 goto unlock;
2916 }
2917
2918 i = 0;
2919 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2920 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2921 continue;
2922 bacpy(&rp->addr[i].bdaddr, &c->dst);
2923 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2924 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2925 continue;
2926 i++;
2927 }
2928
2929 rp->conn_count = cpu_to_le16(i);
2930
2931 /* Recalculate length in case of filtered SCO connections, etc */
2932 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2933
2934 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2935 rp_len);
2936
2937 kfree(rp);
2938
2939 unlock:
2940 hci_dev_unlock(hdev);
2941 return err;
2942 }
2943
2944 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2945 struct mgmt_cp_pin_code_neg_reply *cp)
2946 {
2947 struct pending_cmd *cmd;
2948 int err;
2949
2950 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2951 sizeof(*cp));
2952 if (!cmd)
2953 return -ENOMEM;
2954
2955 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2956 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2957 if (err < 0)
2958 mgmt_pending_remove(cmd);
2959
2960 return err;
2961 }
2962
2963 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2964 u16 len)
2965 {
2966 struct hci_conn *conn;
2967 struct mgmt_cp_pin_code_reply *cp = data;
2968 struct hci_cp_pin_code_reply reply;
2969 struct pending_cmd *cmd;
2970 int err;
2971
2972 BT_DBG("");
2973
2974 hci_dev_lock(hdev);
2975
2976 if (!hdev_is_powered(hdev)) {
2977 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2978 MGMT_STATUS_NOT_POWERED);
2979 goto failed;
2980 }
2981
2982 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2983 if (!conn) {
2984 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2985 MGMT_STATUS_NOT_CONNECTED);
2986 goto failed;
2987 }
2988
2989 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2990 struct mgmt_cp_pin_code_neg_reply ncp;
2991
2992 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2993
2994 BT_ERR("PIN code is not 16 bytes long");
2995
2996 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2997 if (err >= 0)
2998 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2999 MGMT_STATUS_INVALID_PARAMS);
3000
3001 goto failed;
3002 }
3003
3004 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3005 if (!cmd) {
3006 err = -ENOMEM;
3007 goto failed;
3008 }
3009
3010 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3011 reply.pin_len = cp->pin_len;
3012 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3013
3014 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3015 if (err < 0)
3016 mgmt_pending_remove(cmd);
3017
3018 failed:
3019 hci_dev_unlock(hdev);
3020 return err;
3021 }
3022
3023 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3024 u16 len)
3025 {
3026 struct mgmt_cp_set_io_capability *cp = data;
3027
3028 BT_DBG("");
3029
3030 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3031 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3032 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3033
3034 hci_dev_lock(hdev);
3035
3036 hdev->io_capability = cp->io_capability;
3037
3038 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3039 hdev->io_capability);
3040
3041 hci_dev_unlock(hdev);
3042
3043 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3044 0);
3045 }
3046
3047 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3048 {
3049 struct hci_dev *hdev = conn->hdev;
3050 struct pending_cmd *cmd;
3051
3052 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3053 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3054 continue;
3055
3056 if (cmd->user_data != conn)
3057 continue;
3058
3059 return cmd;
3060 }
3061
3062 return NULL;
3063 }
3064
3065 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3066 {
3067 struct mgmt_rp_pair_device rp;
3068 struct hci_conn *conn = cmd->user_data;
3069
3070 bacpy(&rp.addr.bdaddr, &conn->dst);
3071 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3072
3073 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3074 &rp, sizeof(rp));
3075
3076 /* So we don't get further callbacks for this connection */
3077 conn->connect_cfm_cb = NULL;
3078 conn->security_cfm_cb = NULL;
3079 conn->disconn_cfm_cb = NULL;
3080
3081 hci_conn_drop(conn);
3082 hci_conn_put(conn);
3083
3084 mgmt_pending_remove(cmd);
3085
3086 /* The device is paired so there is no need to remove
3087 * its connection parameters anymore.
3088 */
3089 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3090 }
3091
3092 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3093 {
3094 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3095 struct pending_cmd *cmd;
3096
3097 cmd = find_pairing(conn);
3098 if (cmd)
3099 pairing_complete(cmd, status);
3100 }
3101
3102 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3103 {
3104 struct pending_cmd *cmd;
3105
3106 BT_DBG("status %u", status);
3107
3108 cmd = find_pairing(conn);
3109 if (!cmd)
3110 BT_DBG("Unable to find a pending command");
3111 else
3112 pairing_complete(cmd, mgmt_status(status));
3113 }
3114
3115 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3116 {
3117 struct pending_cmd *cmd;
3118
3119 BT_DBG("status %u", status);
3120
3121 if (!status)
3122 return;
3123
3124 cmd = find_pairing(conn);
3125 if (!cmd)
3126 BT_DBG("Unable to find a pending command");
3127 else
3128 pairing_complete(cmd, mgmt_status(status));
3129 }
3130
3131 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3132 u16 len)
3133 {
3134 struct mgmt_cp_pair_device *cp = data;
3135 struct mgmt_rp_pair_device rp;
3136 struct pending_cmd *cmd;
3137 u8 sec_level, auth_type;
3138 struct hci_conn *conn;
3139 int err;
3140
3141 BT_DBG("");
3142
3143 memset(&rp, 0, sizeof(rp));
3144 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3145 rp.addr.type = cp->addr.type;
3146
3147 if (!bdaddr_type_is_valid(cp->addr.type))
3148 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3149 MGMT_STATUS_INVALID_PARAMS,
3150 &rp, sizeof(rp));
3151
3152 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3153 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3154 MGMT_STATUS_INVALID_PARAMS,
3155 &rp, sizeof(rp));
3156
3157 hci_dev_lock(hdev);
3158
3159 if (!hdev_is_powered(hdev)) {
3160 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3161 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3162 goto unlock;
3163 }
3164
3165 sec_level = BT_SECURITY_MEDIUM;
3166 auth_type = HCI_AT_DEDICATED_BONDING;
3167
3168 if (cp->addr.type == BDADDR_BREDR) {
3169 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3170 auth_type);
3171 } else {
3172 u8 addr_type;
3173
3174 /* Convert from L2CAP channel address type to HCI address type
3175 */
3176 if (cp->addr.type == BDADDR_LE_PUBLIC)
3177 addr_type = ADDR_LE_DEV_PUBLIC;
3178 else
3179 addr_type = ADDR_LE_DEV_RANDOM;
3180
3181 /* When pairing a new device, it is expected to remember
3182 * this device for future connections. Adding the connection
3183 * parameter information ahead of time allows tracking
3184 * of the slave preferred values and will speed up any
3185 * further connection establishment.
3186 *
3187 * If connection parameters already exist, then they
3188 * will be kept and this function does nothing.
3189 */
3190 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3191
3192 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3193 sec_level, HCI_LE_CONN_TIMEOUT,
3194 HCI_ROLE_MASTER);
3195 }
3196
3197 if (IS_ERR(conn)) {
3198 int status;
3199
3200 if (PTR_ERR(conn) == -EBUSY)
3201 status = MGMT_STATUS_BUSY;
3202 else
3203 status = MGMT_STATUS_CONNECT_FAILED;
3204
3205 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3206 status, &rp,
3207 sizeof(rp));
3208 goto unlock;
3209 }
3210
3211 if (conn->connect_cfm_cb) {
3212 hci_conn_drop(conn);
3213 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3214 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3215 goto unlock;
3216 }
3217
3218 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3219 if (!cmd) {
3220 err = -ENOMEM;
3221 hci_conn_drop(conn);
3222 goto unlock;
3223 }
3224
3225 /* For LE, just connecting isn't a proof that the pairing finished */
3226 if (cp->addr.type == BDADDR_BREDR) {
3227 conn->connect_cfm_cb = pairing_complete_cb;
3228 conn->security_cfm_cb = pairing_complete_cb;
3229 conn->disconn_cfm_cb = pairing_complete_cb;
3230 } else {
3231 conn->connect_cfm_cb = le_pairing_complete_cb;
3232 conn->security_cfm_cb = le_pairing_complete_cb;
3233 conn->disconn_cfm_cb = le_pairing_complete_cb;
3234 }
3235
3236 conn->io_capability = cp->io_cap;
3237 cmd->user_data = hci_conn_get(conn);
3238
3239 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3240 hci_conn_security(conn, sec_level, auth_type, true))
3241 pairing_complete(cmd, 0);
3242
3243 err = 0;
3244
3245 unlock:
3246 hci_dev_unlock(hdev);
3247 return err;
3248 }
3249
3250 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3251 u16 len)
3252 {
3253 struct mgmt_addr_info *addr = data;
3254 struct pending_cmd *cmd;
3255 struct hci_conn *conn;
3256 int err;
3257
3258 BT_DBG("");
3259
3260 hci_dev_lock(hdev);
3261
3262 if (!hdev_is_powered(hdev)) {
3263 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3264 MGMT_STATUS_NOT_POWERED);
3265 goto unlock;
3266 }
3267
3268 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3269 if (!cmd) {
3270 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3271 MGMT_STATUS_INVALID_PARAMS);
3272 goto unlock;
3273 }
3274
3275 conn = cmd->user_data;
3276
3277 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3278 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3279 MGMT_STATUS_INVALID_PARAMS);
3280 goto unlock;
3281 }
3282
3283 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3284
3285 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3286 addr, sizeof(*addr));
3287 unlock:
3288 hci_dev_unlock(hdev);
3289 return err;
3290 }
3291
3292 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3293 struct mgmt_addr_info *addr, u16 mgmt_op,
3294 u16 hci_op, __le32 passkey)
3295 {
3296 struct pending_cmd *cmd;
3297 struct hci_conn *conn;
3298 int err;
3299
3300 hci_dev_lock(hdev);
3301
3302 if (!hdev_is_powered(hdev)) {
3303 err = cmd_complete(sk, hdev->id, mgmt_op,
3304 MGMT_STATUS_NOT_POWERED, addr,
3305 sizeof(*addr));
3306 goto done;
3307 }
3308
3309 if (addr->type == BDADDR_BREDR)
3310 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3311 else
3312 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3313
3314 if (!conn) {
3315 err = cmd_complete(sk, hdev->id, mgmt_op,
3316 MGMT_STATUS_NOT_CONNECTED, addr,
3317 sizeof(*addr));
3318 goto done;
3319 }
3320
3321 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3322 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3323 if (!err)
3324 err = cmd_complete(sk, hdev->id, mgmt_op,
3325 MGMT_STATUS_SUCCESS, addr,
3326 sizeof(*addr));
3327 else
3328 err = cmd_complete(sk, hdev->id, mgmt_op,
3329 MGMT_STATUS_FAILED, addr,
3330 sizeof(*addr));
3331
3332 goto done;
3333 }
3334
3335 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3336 if (!cmd) {
3337 err = -ENOMEM;
3338 goto done;
3339 }
3340
3341 /* Continue with pairing via HCI */
3342 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3343 struct hci_cp_user_passkey_reply cp;
3344
3345 bacpy(&cp.bdaddr, &addr->bdaddr);
3346 cp.passkey = passkey;
3347 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3348 } else
3349 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3350 &addr->bdaddr);
3351
3352 if (err < 0)
3353 mgmt_pending_remove(cmd);
3354
3355 done:
3356 hci_dev_unlock(hdev);
3357 return err;
3358 }
3359
3360 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3361 void *data, u16 len)
3362 {
3363 struct mgmt_cp_pin_code_neg_reply *cp = data;
3364
3365 BT_DBG("");
3366
3367 return user_pairing_resp(sk, hdev, &cp->addr,
3368 MGMT_OP_PIN_CODE_NEG_REPLY,
3369 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3370 }
3371
3372 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3373 u16 len)
3374 {
3375 struct mgmt_cp_user_confirm_reply *cp = data;
3376
3377 BT_DBG("");
3378
3379 if (len != sizeof(*cp))
3380 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3381 MGMT_STATUS_INVALID_PARAMS);
3382
3383 return user_pairing_resp(sk, hdev, &cp->addr,
3384 MGMT_OP_USER_CONFIRM_REPLY,
3385 HCI_OP_USER_CONFIRM_REPLY, 0);
3386 }
3387
3388 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3389 void *data, u16 len)
3390 {
3391 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3392
3393 BT_DBG("");
3394
3395 return user_pairing_resp(sk, hdev, &cp->addr,
3396 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3397 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3398 }
3399
3400 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3401 u16 len)
3402 {
3403 struct mgmt_cp_user_passkey_reply *cp = data;
3404
3405 BT_DBG("");
3406
3407 return user_pairing_resp(sk, hdev, &cp->addr,
3408 MGMT_OP_USER_PASSKEY_REPLY,
3409 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3410 }
3411
3412 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3413 void *data, u16 len)
3414 {
3415 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3416
3417 BT_DBG("");
3418
3419 return user_pairing_resp(sk, hdev, &cp->addr,
3420 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3421 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3422 }
3423
3424 static void update_name(struct hci_request *req)
3425 {
3426 struct hci_dev *hdev = req->hdev;
3427 struct hci_cp_write_local_name cp;
3428
3429 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3430
3431 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3432 }
3433
3434 static void set_name_complete(struct hci_dev *hdev, u8 status)
3435 {
3436 struct mgmt_cp_set_local_name *cp;
3437 struct pending_cmd *cmd;
3438
3439 BT_DBG("status 0x%02x", status);
3440
3441 hci_dev_lock(hdev);
3442
3443 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3444 if (!cmd)
3445 goto unlock;
3446
3447 cp = cmd->param;
3448
3449 if (status)
3450 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3451 mgmt_status(status));
3452 else
3453 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3454 cp, sizeof(*cp));
3455
3456 mgmt_pending_remove(cmd);
3457
3458 unlock:
3459 hci_dev_unlock(hdev);
3460 }
3461
3462 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3463 u16 len)
3464 {
3465 struct mgmt_cp_set_local_name *cp = data;
3466 struct pending_cmd *cmd;
3467 struct hci_request req;
3468 int err;
3469
3470 BT_DBG("");
3471
3472 hci_dev_lock(hdev);
3473
3474 /* If the old values are the same as the new ones just return a
3475 * direct command complete event.
3476 */
3477 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3478 !memcmp(hdev->short_name, cp->short_name,
3479 sizeof(hdev->short_name))) {
3480 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3481 data, len);
3482 goto failed;
3483 }
3484
3485 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3486
3487 if (!hdev_is_powered(hdev)) {
3488 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3489
3490 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3491 data, len);
3492 if (err < 0)
3493 goto failed;
3494
3495 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3496 sk);
3497
3498 goto failed;
3499 }
3500
3501 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3502 if (!cmd) {
3503 err = -ENOMEM;
3504 goto failed;
3505 }
3506
3507 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3508
3509 hci_req_init(&req, hdev);
3510
3511 if (lmp_bredr_capable(hdev)) {
3512 update_name(&req);
3513 update_eir(&req);
3514 }
3515
3516 /* The name is stored in the scan response data and so
3517 * no need to udpate the advertising data here.
3518 */
3519 if (lmp_le_capable(hdev))
3520 update_scan_rsp_data(&req);
3521
3522 err = hci_req_run(&req, set_name_complete);
3523 if (err < 0)
3524 mgmt_pending_remove(cmd);
3525
3526 failed:
3527 hci_dev_unlock(hdev);
3528 return err;
3529 }
3530
3531 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3532 void *data, u16 data_len)
3533 {
3534 struct pending_cmd *cmd;
3535 int err;
3536
3537 BT_DBG("%s", hdev->name);
3538
3539 hci_dev_lock(hdev);
3540
3541 if (!hdev_is_powered(hdev)) {
3542 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3543 MGMT_STATUS_NOT_POWERED);
3544 goto unlock;
3545 }
3546
3547 if (!lmp_ssp_capable(hdev)) {
3548 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3549 MGMT_STATUS_NOT_SUPPORTED);
3550 goto unlock;
3551 }
3552
3553 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3554 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3555 MGMT_STATUS_BUSY);
3556 goto unlock;
3557 }
3558
3559 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3560 if (!cmd) {
3561 err = -ENOMEM;
3562 goto unlock;
3563 }
3564
3565 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3566 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3567 0, NULL);
3568 else
3569 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3570
3571 if (err < 0)
3572 mgmt_pending_remove(cmd);
3573
3574 unlock:
3575 hci_dev_unlock(hdev);
3576 return err;
3577 }
3578
3579 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3580 void *data, u16 len)
3581 {
3582 int err;
3583
3584 BT_DBG("%s ", hdev->name);
3585
3586 hci_dev_lock(hdev);
3587
3588 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3589 struct mgmt_cp_add_remote_oob_data *cp = data;
3590 u8 status;
3591
3592 if (cp->addr.type != BDADDR_BREDR) {
3593 err = cmd_complete(sk, hdev->id,
3594 MGMT_OP_ADD_REMOTE_OOB_DATA,
3595 MGMT_STATUS_INVALID_PARAMS,
3596 &cp->addr, sizeof(cp->addr));
3597 goto unlock;
3598 }
3599
3600 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3601 cp->hash, cp->rand);
3602 if (err < 0)
3603 status = MGMT_STATUS_FAILED;
3604 else
3605 status = MGMT_STATUS_SUCCESS;
3606
3607 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3608 status, &cp->addr, sizeof(cp->addr));
3609 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3610 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3611 u8 status;
3612
3613 if (cp->addr.type != BDADDR_BREDR) {
3614 err = cmd_complete(sk, hdev->id,
3615 MGMT_OP_ADD_REMOTE_OOB_DATA,
3616 MGMT_STATUS_INVALID_PARAMS,
3617 &cp->addr, sizeof(cp->addr));
3618 goto unlock;
3619 }
3620
3621 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3622 cp->hash192, cp->rand192,
3623 cp->hash256, cp->rand256);
3624 if (err < 0)
3625 status = MGMT_STATUS_FAILED;
3626 else
3627 status = MGMT_STATUS_SUCCESS;
3628
3629 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3630 status, &cp->addr, sizeof(cp->addr));
3631 } else {
3632 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3633 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3634 MGMT_STATUS_INVALID_PARAMS);
3635 }
3636
3637 unlock:
3638 hci_dev_unlock(hdev);
3639 return err;
3640 }
3641
3642 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3643 void *data, u16 len)
3644 {
3645 struct mgmt_cp_remove_remote_oob_data *cp = data;
3646 u8 status;
3647 int err;
3648
3649 BT_DBG("%s", hdev->name);
3650
3651 if (cp->addr.type != BDADDR_BREDR)
3652 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3653 MGMT_STATUS_INVALID_PARAMS,
3654 &cp->addr, sizeof(cp->addr));
3655
3656 hci_dev_lock(hdev);
3657
3658 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3659 hci_remote_oob_data_clear(hdev);
3660 status = MGMT_STATUS_SUCCESS;
3661 goto done;
3662 }
3663
3664 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3665 if (err < 0)
3666 status = MGMT_STATUS_INVALID_PARAMS;
3667 else
3668 status = MGMT_STATUS_SUCCESS;
3669
3670 done:
3671 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3672 status, &cp->addr, sizeof(cp->addr));
3673
3674 hci_dev_unlock(hdev);
3675 return err;
3676 }
3677
3678 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3679 {
3680 struct pending_cmd *cmd;
3681 u8 type;
3682 int err;
3683
3684 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3685
3686 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3687 if (!cmd)
3688 return -ENOENT;
3689
3690 type = hdev->discovery.type;
3691
3692 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3693 &type, sizeof(type));
3694 mgmt_pending_remove(cmd);
3695
3696 return err;
3697 }
3698
3699 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3700 {
3701 unsigned long timeout = 0;
3702
3703 BT_DBG("status %d", status);
3704
3705 if (status) {
3706 hci_dev_lock(hdev);
3707 mgmt_start_discovery_failed(hdev, status);
3708 hci_dev_unlock(hdev);
3709 return;
3710 }
3711
3712 hci_dev_lock(hdev);
3713 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3714 hci_dev_unlock(hdev);
3715
3716 switch (hdev->discovery.type) {
3717 case DISCOV_TYPE_LE:
3718 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3719 break;
3720
3721 case DISCOV_TYPE_INTERLEAVED:
3722 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3723 break;
3724
3725 case DISCOV_TYPE_BREDR:
3726 break;
3727
3728 default:
3729 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3730 }
3731
3732 if (!timeout)
3733 return;
3734
3735 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3736 }
3737
3738 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3739 void *data, u16 len)
3740 {
3741 struct mgmt_cp_start_discovery *cp = data;
3742 struct pending_cmd *cmd;
3743 struct hci_cp_le_set_scan_param param_cp;
3744 struct hci_cp_le_set_scan_enable enable_cp;
3745 struct hci_cp_inquiry inq_cp;
3746 struct hci_request req;
3747 /* General inquiry access code (GIAC) */
3748 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3749 u8 status, own_addr_type;
3750 int err;
3751
3752 BT_DBG("%s", hdev->name);
3753
3754 hci_dev_lock(hdev);
3755
3756 if (!hdev_is_powered(hdev)) {
3757 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3758 MGMT_STATUS_NOT_POWERED,
3759 &cp->type, sizeof(cp->type));
3760 goto failed;
3761 }
3762
3763 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3764 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3765 MGMT_STATUS_BUSY, &cp->type,
3766 sizeof(cp->type));
3767 goto failed;
3768 }
3769
3770 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3771 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3772 MGMT_STATUS_BUSY, &cp->type,
3773 sizeof(cp->type));
3774 goto failed;
3775 }
3776
3777 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3778 if (!cmd) {
3779 err = -ENOMEM;
3780 goto failed;
3781 }
3782
3783 hdev->discovery.type = cp->type;
3784
3785 hci_req_init(&req, hdev);
3786
3787 switch (hdev->discovery.type) {
3788 case DISCOV_TYPE_BREDR:
3789 status = mgmt_bredr_support(hdev);
3790 if (status) {
3791 err = cmd_complete(sk, hdev->id,
3792 MGMT_OP_START_DISCOVERY, status,
3793 &cp->type, sizeof(cp->type));
3794 mgmt_pending_remove(cmd);
3795 goto failed;
3796 }
3797
3798 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3799 err = cmd_complete(sk, hdev->id,
3800 MGMT_OP_START_DISCOVERY,
3801 MGMT_STATUS_BUSY, &cp->type,
3802 sizeof(cp->type));
3803 mgmt_pending_remove(cmd);
3804 goto failed;
3805 }
3806
3807 hci_inquiry_cache_flush(hdev);
3808
3809 memset(&inq_cp, 0, sizeof(inq_cp));
3810 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3811 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3812 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3813 break;
3814
3815 case DISCOV_TYPE_LE:
3816 case DISCOV_TYPE_INTERLEAVED:
3817 status = mgmt_le_support(hdev);
3818 if (status) {
3819 err = cmd_complete(sk, hdev->id,
3820 MGMT_OP_START_DISCOVERY, status,
3821 &cp->type, sizeof(cp->type));
3822 mgmt_pending_remove(cmd);
3823 goto failed;
3824 }
3825
3826 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3827 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3828 err = cmd_complete(sk, hdev->id,
3829 MGMT_OP_START_DISCOVERY,
3830 MGMT_STATUS_NOT_SUPPORTED,
3831 &cp->type, sizeof(cp->type));
3832 mgmt_pending_remove(cmd);
3833 goto failed;
3834 }
3835
3836 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3837 /* Don't let discovery abort an outgoing
3838 * connection attempt that's using directed
3839 * advertising.
3840 */
3841 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3842 BT_CONNECT)) {
3843 err = cmd_complete(sk, hdev->id,
3844 MGMT_OP_START_DISCOVERY,
3845 MGMT_STATUS_REJECTED,
3846 &cp->type,
3847 sizeof(cp->type));
3848 mgmt_pending_remove(cmd);
3849 goto failed;
3850 }
3851
3852 disable_advertising(&req);
3853 }
3854
3855 /* If controller is scanning, it means the background scanning
3856 * is running. Thus, we should temporarily stop it in order to
3857 * set the discovery scanning parameters.
3858 */
3859 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3860 hci_req_add_le_scan_disable(&req);
3861
3862 memset(&param_cp, 0, sizeof(param_cp));
3863
3864 /* All active scans will be done with either a resolvable
3865 * private address (when privacy feature has been enabled)
3866 * or unresolvable private address.
3867 */
3868 err = hci_update_random_address(&req, true, &own_addr_type);
3869 if (err < 0) {
3870 err = cmd_complete(sk, hdev->id,
3871 MGMT_OP_START_DISCOVERY,
3872 MGMT_STATUS_FAILED,
3873 &cp->type, sizeof(cp->type));
3874 mgmt_pending_remove(cmd);
3875 goto failed;
3876 }
3877
3878 param_cp.type = LE_SCAN_ACTIVE;
3879 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3880 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3881 param_cp.own_address_type = own_addr_type;
3882 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3883 &param_cp);
3884
3885 memset(&enable_cp, 0, sizeof(enable_cp));
3886 enable_cp.enable = LE_SCAN_ENABLE;
3887 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3888 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3889 &enable_cp);
3890 break;
3891
3892 default:
3893 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3894 MGMT_STATUS_INVALID_PARAMS,
3895 &cp->type, sizeof(cp->type));
3896 mgmt_pending_remove(cmd);
3897 goto failed;
3898 }
3899
3900 err = hci_req_run(&req, start_discovery_complete);
3901 if (err < 0)
3902 mgmt_pending_remove(cmd);
3903 else
3904 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3905
3906 failed:
3907 hci_dev_unlock(hdev);
3908 return err;
3909 }
3910
3911 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3912 {
3913 struct pending_cmd *cmd;
3914 int err;
3915
3916 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3917 if (!cmd)
3918 return -ENOENT;
3919
3920 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3921 &hdev->discovery.type, sizeof(hdev->discovery.type));
3922 mgmt_pending_remove(cmd);
3923
3924 return err;
3925 }
3926
3927 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3928 {
3929 BT_DBG("status %d", status);
3930
3931 hci_dev_lock(hdev);
3932
3933 if (status) {
3934 mgmt_stop_discovery_failed(hdev, status);
3935 goto unlock;
3936 }
3937
3938 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3939
3940 unlock:
3941 hci_dev_unlock(hdev);
3942 }
3943
3944 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3945 u16 len)
3946 {
3947 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3948 struct pending_cmd *cmd;
3949 struct hci_request req;
3950 int err;
3951
3952 BT_DBG("%s", hdev->name);
3953
3954 hci_dev_lock(hdev);
3955
3956 if (!hci_discovery_active(hdev)) {
3957 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3958 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3959 sizeof(mgmt_cp->type));
3960 goto unlock;
3961 }
3962
3963 if (hdev->discovery.type != mgmt_cp->type) {
3964 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3965 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3966 sizeof(mgmt_cp->type));
3967 goto unlock;
3968 }
3969
3970 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3971 if (!cmd) {
3972 err = -ENOMEM;
3973 goto unlock;
3974 }
3975
3976 hci_req_init(&req, hdev);
3977
3978 hci_stop_discovery(&req);
3979
3980 err = hci_req_run(&req, stop_discovery_complete);
3981 if (!err) {
3982 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3983 goto unlock;
3984 }
3985
3986 mgmt_pending_remove(cmd);
3987
3988 /* If no HCI commands were sent we're done */
3989 if (err == -ENODATA) {
3990 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3991 &mgmt_cp->type, sizeof(mgmt_cp->type));
3992 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3993 }
3994
3995 unlock:
3996 hci_dev_unlock(hdev);
3997 return err;
3998 }
3999
4000 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4001 u16 len)
4002 {
4003 struct mgmt_cp_confirm_name *cp = data;
4004 struct inquiry_entry *e;
4005 int err;
4006
4007 BT_DBG("%s", hdev->name);
4008
4009 hci_dev_lock(hdev);
4010
4011 if (!hci_discovery_active(hdev)) {
4012 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4013 MGMT_STATUS_FAILED, &cp->addr,
4014 sizeof(cp->addr));
4015 goto failed;
4016 }
4017
4018 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4019 if (!e) {
4020 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4021 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4022 sizeof(cp->addr));
4023 goto failed;
4024 }
4025
4026 if (cp->name_known) {
4027 e->name_state = NAME_KNOWN;
4028 list_del(&e->list);
4029 } else {
4030 e->name_state = NAME_NEEDED;
4031 hci_inquiry_cache_update_resolve(hdev, e);
4032 }
4033
4034 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4035 sizeof(cp->addr));
4036
4037 failed:
4038 hci_dev_unlock(hdev);
4039 return err;
4040 }
4041
4042 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4043 u16 len)
4044 {
4045 struct mgmt_cp_block_device *cp = data;
4046 u8 status;
4047 int err;
4048
4049 BT_DBG("%s", hdev->name);
4050
4051 if (!bdaddr_type_is_valid(cp->addr.type))
4052 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4053 MGMT_STATUS_INVALID_PARAMS,
4054 &cp->addr, sizeof(cp->addr));
4055
4056 hci_dev_lock(hdev);
4057
4058 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4059 cp->addr.type);
4060 if (err < 0) {
4061 status = MGMT_STATUS_FAILED;
4062 goto done;
4063 }
4064
4065 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4066 sk);
4067 status = MGMT_STATUS_SUCCESS;
4068
4069 done:
4070 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4071 &cp->addr, sizeof(cp->addr));
4072
4073 hci_dev_unlock(hdev);
4074
4075 return err;
4076 }
4077
4078 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4079 u16 len)
4080 {
4081 struct mgmt_cp_unblock_device *cp = data;
4082 u8 status;
4083 int err;
4084
4085 BT_DBG("%s", hdev->name);
4086
4087 if (!bdaddr_type_is_valid(cp->addr.type))
4088 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4089 MGMT_STATUS_INVALID_PARAMS,
4090 &cp->addr, sizeof(cp->addr));
4091
4092 hci_dev_lock(hdev);
4093
4094 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4095 cp->addr.type);
4096 if (err < 0) {
4097 status = MGMT_STATUS_INVALID_PARAMS;
4098 goto done;
4099 }
4100
4101 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4102 sk);
4103 status = MGMT_STATUS_SUCCESS;
4104
4105 done:
4106 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4107 &cp->addr, sizeof(cp->addr));
4108
4109 hci_dev_unlock(hdev);
4110
4111 return err;
4112 }
4113
4114 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4115 u16 len)
4116 {
4117 struct mgmt_cp_set_device_id *cp = data;
4118 struct hci_request req;
4119 int err;
4120 __u16 source;
4121
4122 BT_DBG("%s", hdev->name);
4123
4124 source = __le16_to_cpu(cp->source);
4125
4126 if (source > 0x0002)
4127 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4128 MGMT_STATUS_INVALID_PARAMS);
4129
4130 hci_dev_lock(hdev);
4131
4132 hdev->devid_source = source;
4133 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4134 hdev->devid_product = __le16_to_cpu(cp->product);
4135 hdev->devid_version = __le16_to_cpu(cp->version);
4136
4137 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4138
4139 hci_req_init(&req, hdev);
4140 update_eir(&req);
4141 hci_req_run(&req, NULL);
4142
4143 hci_dev_unlock(hdev);
4144
4145 return err;
4146 }
4147
4148 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4149 {
4150 struct cmd_lookup match = { NULL, hdev };
4151
4152 if (status) {
4153 u8 mgmt_err = mgmt_status(status);
4154
4155 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4156 cmd_status_rsp, &mgmt_err);
4157 return;
4158 }
4159
4160 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4161 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4162 else
4163 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4164
4165 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4166 &match);
4167
4168 new_settings(hdev, match.sk);
4169
4170 if (match.sk)
4171 sock_put(match.sk);
4172 }
4173
4174 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4175 u16 len)
4176 {
4177 struct mgmt_mode *cp = data;
4178 struct pending_cmd *cmd;
4179 struct hci_request req;
4180 u8 val, enabled, status;
4181 int err;
4182
4183 BT_DBG("request for %s", hdev->name);
4184
4185 status = mgmt_le_support(hdev);
4186 if (status)
4187 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4188 status);
4189
4190 if (cp->val != 0x00 && cp->val != 0x01)
4191 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4192 MGMT_STATUS_INVALID_PARAMS);
4193
4194 hci_dev_lock(hdev);
4195
4196 val = !!cp->val;
4197 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4198
4199 /* The following conditions are ones which mean that we should
4200 * not do any HCI communication but directly send a mgmt
4201 * response to user space (after toggling the flag if
4202 * necessary).
4203 */
4204 if (!hdev_is_powered(hdev) || val == enabled ||
4205 hci_conn_num(hdev, LE_LINK) > 0 ||
4206 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4207 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4208 bool changed = false;
4209
4210 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4211 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4212 changed = true;
4213 }
4214
4215 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4216 if (err < 0)
4217 goto unlock;
4218
4219 if (changed)
4220 err = new_settings(hdev, sk);
4221
4222 goto unlock;
4223 }
4224
4225 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4226 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4227 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4228 MGMT_STATUS_BUSY);
4229 goto unlock;
4230 }
4231
4232 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4233 if (!cmd) {
4234 err = -ENOMEM;
4235 goto unlock;
4236 }
4237
4238 hci_req_init(&req, hdev);
4239
4240 if (val)
4241 enable_advertising(&req);
4242 else
4243 disable_advertising(&req);
4244
4245 err = hci_req_run(&req, set_advertising_complete);
4246 if (err < 0)
4247 mgmt_pending_remove(cmd);
4248
4249 unlock:
4250 hci_dev_unlock(hdev);
4251 return err;
4252 }
4253
4254 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4255 void *data, u16 len)
4256 {
4257 struct mgmt_cp_set_static_address *cp = data;
4258 int err;
4259
4260 BT_DBG("%s", hdev->name);
4261
4262 if (!lmp_le_capable(hdev))
4263 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4264 MGMT_STATUS_NOT_SUPPORTED);
4265
4266 if (hdev_is_powered(hdev))
4267 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4268 MGMT_STATUS_REJECTED);
4269
4270 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4271 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4272 return cmd_status(sk, hdev->id,
4273 MGMT_OP_SET_STATIC_ADDRESS,
4274 MGMT_STATUS_INVALID_PARAMS);
4275
4276 /* Two most significant bits shall be set */
4277 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4278 return cmd_status(sk, hdev->id,
4279 MGMT_OP_SET_STATIC_ADDRESS,
4280 MGMT_STATUS_INVALID_PARAMS);
4281 }
4282
4283 hci_dev_lock(hdev);
4284
4285 bacpy(&hdev->static_addr, &cp->bdaddr);
4286
4287 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4288
4289 hci_dev_unlock(hdev);
4290
4291 return err;
4292 }
4293
4294 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4295 void *data, u16 len)
4296 {
4297 struct mgmt_cp_set_scan_params *cp = data;
4298 __u16 interval, window;
4299 int err;
4300
4301 BT_DBG("%s", hdev->name);
4302
4303 if (!lmp_le_capable(hdev))
4304 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4305 MGMT_STATUS_NOT_SUPPORTED);
4306
4307 interval = __le16_to_cpu(cp->interval);
4308
4309 if (interval < 0x0004 || interval > 0x4000)
4310 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4311 MGMT_STATUS_INVALID_PARAMS);
4312
4313 window = __le16_to_cpu(cp->window);
4314
4315 if (window < 0x0004 || window > 0x4000)
4316 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4317 MGMT_STATUS_INVALID_PARAMS);
4318
4319 if (window > interval)
4320 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4321 MGMT_STATUS_INVALID_PARAMS);
4322
4323 hci_dev_lock(hdev);
4324
4325 hdev->le_scan_interval = interval;
4326 hdev->le_scan_window = window;
4327
4328 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4329
4330 /* If background scan is running, restart it so new parameters are
4331 * loaded.
4332 */
4333 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4334 hdev->discovery.state == DISCOVERY_STOPPED) {
4335 struct hci_request req;
4336
4337 hci_req_init(&req, hdev);
4338
4339 hci_req_add_le_scan_disable(&req);
4340 hci_req_add_le_passive_scan(&req);
4341
4342 hci_req_run(&req, NULL);
4343 }
4344
4345 hci_dev_unlock(hdev);
4346
4347 return err;
4348 }
4349
4350 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4351 {
4352 struct pending_cmd *cmd;
4353
4354 BT_DBG("status 0x%02x", status);
4355
4356 hci_dev_lock(hdev);
4357
4358 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4359 if (!cmd)
4360 goto unlock;
4361
4362 if (status) {
4363 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4364 mgmt_status(status));
4365 } else {
4366 struct mgmt_mode *cp = cmd->param;
4367
4368 if (cp->val)
4369 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4370 else
4371 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4372
4373 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4374 new_settings(hdev, cmd->sk);
4375 }
4376
4377 mgmt_pending_remove(cmd);
4378
4379 unlock:
4380 hci_dev_unlock(hdev);
4381 }
4382
4383 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4384 void *data, u16 len)
4385 {
4386 struct mgmt_mode *cp = data;
4387 struct pending_cmd *cmd;
4388 struct hci_request req;
4389 int err;
4390
4391 BT_DBG("%s", hdev->name);
4392
4393 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4394 hdev->hci_ver < BLUETOOTH_VER_1_2)
4395 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4396 MGMT_STATUS_NOT_SUPPORTED);
4397
4398 if (cp->val != 0x00 && cp->val != 0x01)
4399 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4400 MGMT_STATUS_INVALID_PARAMS);
4401
4402 if (!hdev_is_powered(hdev))
4403 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4404 MGMT_STATUS_NOT_POWERED);
4405
4406 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4407 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4408 MGMT_STATUS_REJECTED);
4409
4410 hci_dev_lock(hdev);
4411
4412 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4413 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4414 MGMT_STATUS_BUSY);
4415 goto unlock;
4416 }
4417
4418 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4419 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4420 hdev);
4421 goto unlock;
4422 }
4423
4424 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4425 data, len);
4426 if (!cmd) {
4427 err = -ENOMEM;
4428 goto unlock;
4429 }
4430
4431 hci_req_init(&req, hdev);
4432
4433 write_fast_connectable(&req, cp->val);
4434
4435 err = hci_req_run(&req, fast_connectable_complete);
4436 if (err < 0) {
4437 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4438 MGMT_STATUS_FAILED);
4439 mgmt_pending_remove(cmd);
4440 }
4441
4442 unlock:
4443 hci_dev_unlock(hdev);
4444
4445 return err;
4446 }
4447
4448 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4449 {
4450 struct pending_cmd *cmd;
4451
4452 BT_DBG("status 0x%02x", status);
4453
4454 hci_dev_lock(hdev);
4455
4456 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4457 if (!cmd)
4458 goto unlock;
4459
4460 if (status) {
4461 u8 mgmt_err = mgmt_status(status);
4462
4463 /* We need to restore the flag if related HCI commands
4464 * failed.
4465 */
4466 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4467
4468 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4469 } else {
4470 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4471 new_settings(hdev, cmd->sk);
4472 }
4473
4474 mgmt_pending_remove(cmd);
4475
4476 unlock:
4477 hci_dev_unlock(hdev);
4478 }
4479
4480 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4481 {
4482 struct mgmt_mode *cp = data;
4483 struct pending_cmd *cmd;
4484 struct hci_request req;
4485 int err;
4486
4487 BT_DBG("request for %s", hdev->name);
4488
4489 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4490 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4491 MGMT_STATUS_NOT_SUPPORTED);
4492
4493 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4494 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4495 MGMT_STATUS_REJECTED);
4496
4497 if (cp->val != 0x00 && cp->val != 0x01)
4498 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4499 MGMT_STATUS_INVALID_PARAMS);
4500
4501 hci_dev_lock(hdev);
4502
4503 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4504 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4505 goto unlock;
4506 }
4507
4508 if (!hdev_is_powered(hdev)) {
4509 if (!cp->val) {
4510 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4511 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4512 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4513 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4514 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4515 }
4516
4517 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4518
4519 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4520 if (err < 0)
4521 goto unlock;
4522
4523 err = new_settings(hdev, sk);
4524 goto unlock;
4525 }
4526
4527 /* Reject disabling when powered on */
4528 if (!cp->val) {
4529 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4530 MGMT_STATUS_REJECTED);
4531 goto unlock;
4532 }
4533
4534 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4535 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4536 MGMT_STATUS_BUSY);
4537 goto unlock;
4538 }
4539
4540 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4541 if (!cmd) {
4542 err = -ENOMEM;
4543 goto unlock;
4544 }
4545
4546 /* We need to flip the bit already here so that update_adv_data
4547 * generates the correct flags.
4548 */
4549 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4550
4551 hci_req_init(&req, hdev);
4552
4553 write_fast_connectable(&req, false);
4554 hci_update_page_scan(hdev, &req);
4555
4556 /* Since only the advertising data flags will change, there
4557 * is no need to update the scan response data.
4558 */
4559 update_adv_data(&req);
4560
4561 err = hci_req_run(&req, set_bredr_complete);
4562 if (err < 0)
4563 mgmt_pending_remove(cmd);
4564
4565 unlock:
4566 hci_dev_unlock(hdev);
4567 return err;
4568 }
4569
4570 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4571 void *data, u16 len)
4572 {
4573 struct mgmt_mode *cp = data;
4574 struct pending_cmd *cmd;
4575 u8 val, status;
4576 int err;
4577
4578 BT_DBG("request for %s", hdev->name);
4579
4580 status = mgmt_bredr_support(hdev);
4581 if (status)
4582 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4583 status);
4584
4585 if (!lmp_sc_capable(hdev) &&
4586 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4587 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4588 MGMT_STATUS_NOT_SUPPORTED);
4589
4590 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4591 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4592 MGMT_STATUS_INVALID_PARAMS);
4593
4594 hci_dev_lock(hdev);
4595
4596 if (!hdev_is_powered(hdev)) {
4597 bool changed;
4598
4599 if (cp->val) {
4600 changed = !test_and_set_bit(HCI_SC_ENABLED,
4601 &hdev->dev_flags);
4602 if (cp->val == 0x02)
4603 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4604 else
4605 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4606 } else {
4607 changed = test_and_clear_bit(HCI_SC_ENABLED,
4608 &hdev->dev_flags);
4609 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4610 }
4611
4612 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4613 if (err < 0)
4614 goto failed;
4615
4616 if (changed)
4617 err = new_settings(hdev, sk);
4618
4619 goto failed;
4620 }
4621
4622 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4623 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4624 MGMT_STATUS_BUSY);
4625 goto failed;
4626 }
4627
4628 val = !!cp->val;
4629
4630 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4631 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4632 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4633 goto failed;
4634 }
4635
4636 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4637 if (!cmd) {
4638 err = -ENOMEM;
4639 goto failed;
4640 }
4641
4642 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4643 if (err < 0) {
4644 mgmt_pending_remove(cmd);
4645 goto failed;
4646 }
4647
4648 if (cp->val == 0x02)
4649 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4650 else
4651 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4652
4653 failed:
4654 hci_dev_unlock(hdev);
4655 return err;
4656 }
4657
4658 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4659 void *data, u16 len)
4660 {
4661 struct mgmt_mode *cp = data;
4662 bool changed, use_changed;
4663 int err;
4664
4665 BT_DBG("request for %s", hdev->name);
4666
4667 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4668 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4669 MGMT_STATUS_INVALID_PARAMS);
4670
4671 hci_dev_lock(hdev);
4672
4673 if (cp->val)
4674 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4675 &hdev->dev_flags);
4676 else
4677 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4678 &hdev->dev_flags);
4679
4680 if (cp->val == 0x02)
4681 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4682 &hdev->dev_flags);
4683 else
4684 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4685 &hdev->dev_flags);
4686
4687 if (hdev_is_powered(hdev) && use_changed &&
4688 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4689 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4690 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4691 sizeof(mode), &mode);
4692 }
4693
4694 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4695 if (err < 0)
4696 goto unlock;
4697
4698 if (changed)
4699 err = new_settings(hdev, sk);
4700
4701 unlock:
4702 hci_dev_unlock(hdev);
4703 return err;
4704 }
4705
4706 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4707 u16 len)
4708 {
4709 struct mgmt_cp_set_privacy *cp = cp_data;
4710 bool changed;
4711 int err;
4712
4713 BT_DBG("request for %s", hdev->name);
4714
4715 if (!lmp_le_capable(hdev))
4716 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4717 MGMT_STATUS_NOT_SUPPORTED);
4718
4719 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4720 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4721 MGMT_STATUS_INVALID_PARAMS);
4722
4723 if (hdev_is_powered(hdev))
4724 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4725 MGMT_STATUS_REJECTED);
4726
4727 hci_dev_lock(hdev);
4728
4729 /* If user space supports this command it is also expected to
4730 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4731 */
4732 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4733
4734 if (cp->privacy) {
4735 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4736 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4737 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4738 } else {
4739 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4740 memset(hdev->irk, 0, sizeof(hdev->irk));
4741 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4742 }
4743
4744 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4745 if (err < 0)
4746 goto unlock;
4747
4748 if (changed)
4749 err = new_settings(hdev, sk);
4750
4751 unlock:
4752 hci_dev_unlock(hdev);
4753 return err;
4754 }
4755
4756 static bool irk_is_valid(struct mgmt_irk_info *irk)
4757 {
4758 switch (irk->addr.type) {
4759 case BDADDR_LE_PUBLIC:
4760 return true;
4761
4762 case BDADDR_LE_RANDOM:
4763 /* Two most significant bits shall be set */
4764 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4765 return false;
4766 return true;
4767 }
4768
4769 return false;
4770 }
4771
4772 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4773 u16 len)
4774 {
4775 struct mgmt_cp_load_irks *cp = cp_data;
4776 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4777 sizeof(struct mgmt_irk_info));
4778 u16 irk_count, expected_len;
4779 int i, err;
4780
4781 BT_DBG("request for %s", hdev->name);
4782
4783 if (!lmp_le_capable(hdev))
4784 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4785 MGMT_STATUS_NOT_SUPPORTED);
4786
4787 irk_count = __le16_to_cpu(cp->irk_count);
4788 if (irk_count > max_irk_count) {
4789 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4790 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4791 MGMT_STATUS_INVALID_PARAMS);
4792 }
4793
4794 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4795 if (expected_len != len) {
4796 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4797 expected_len, len);
4798 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4799 MGMT_STATUS_INVALID_PARAMS);
4800 }
4801
4802 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4803
4804 for (i = 0; i < irk_count; i++) {
4805 struct mgmt_irk_info *key = &cp->irks[i];
4806
4807 if (!irk_is_valid(key))
4808 return cmd_status(sk, hdev->id,
4809 MGMT_OP_LOAD_IRKS,
4810 MGMT_STATUS_INVALID_PARAMS);
4811 }
4812
4813 hci_dev_lock(hdev);
4814
4815 hci_smp_irks_clear(hdev);
4816
4817 for (i = 0; i < irk_count; i++) {
4818 struct mgmt_irk_info *irk = &cp->irks[i];
4819 u8 addr_type;
4820
4821 if (irk->addr.type == BDADDR_LE_PUBLIC)
4822 addr_type = ADDR_LE_DEV_PUBLIC;
4823 else
4824 addr_type = ADDR_LE_DEV_RANDOM;
4825
4826 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4827 BDADDR_ANY);
4828 }
4829
4830 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4831
4832 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4833
4834 hci_dev_unlock(hdev);
4835
4836 return err;
4837 }
4838
4839 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4840 {
4841 if (key->master != 0x00 && key->master != 0x01)
4842 return false;
4843
4844 switch (key->addr.type) {
4845 case BDADDR_LE_PUBLIC:
4846 return true;
4847
4848 case BDADDR_LE_RANDOM:
4849 /* Two most significant bits shall be set */
4850 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4851 return false;
4852 return true;
4853 }
4854
4855 return false;
4856 }
4857
4858 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4859 void *cp_data, u16 len)
4860 {
4861 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4862 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4863 sizeof(struct mgmt_ltk_info));
4864 u16 key_count, expected_len;
4865 int i, err;
4866
4867 BT_DBG("request for %s", hdev->name);
4868
4869 if (!lmp_le_capable(hdev))
4870 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4871 MGMT_STATUS_NOT_SUPPORTED);
4872
4873 key_count = __le16_to_cpu(cp->key_count);
4874 if (key_count > max_key_count) {
4875 BT_ERR("load_ltks: too big key_count value %u", key_count);
4876 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4877 MGMT_STATUS_INVALID_PARAMS);
4878 }
4879
4880 expected_len = sizeof(*cp) + key_count *
4881 sizeof(struct mgmt_ltk_info);
4882 if (expected_len != len) {
4883 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4884 expected_len, len);
4885 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4886 MGMT_STATUS_INVALID_PARAMS);
4887 }
4888
4889 BT_DBG("%s key_count %u", hdev->name, key_count);
4890
4891 for (i = 0; i < key_count; i++) {
4892 struct mgmt_ltk_info *key = &cp->keys[i];
4893
4894 if (!ltk_is_valid(key))
4895 return cmd_status(sk, hdev->id,
4896 MGMT_OP_LOAD_LONG_TERM_KEYS,
4897 MGMT_STATUS_INVALID_PARAMS);
4898 }
4899
4900 hci_dev_lock(hdev);
4901
4902 hci_smp_ltks_clear(hdev);
4903
4904 for (i = 0; i < key_count; i++) {
4905 struct mgmt_ltk_info *key = &cp->keys[i];
4906 u8 type, addr_type, authenticated;
4907
4908 if (key->addr.type == BDADDR_LE_PUBLIC)
4909 addr_type = ADDR_LE_DEV_PUBLIC;
4910 else
4911 addr_type = ADDR_LE_DEV_RANDOM;
4912
4913 switch (key->type) {
4914 case MGMT_LTK_UNAUTHENTICATED:
4915 authenticated = 0x00;
4916 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4917 break;
4918 case MGMT_LTK_AUTHENTICATED:
4919 authenticated = 0x01;
4920 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4921 break;
4922 case MGMT_LTK_P256_UNAUTH:
4923 authenticated = 0x00;
4924 type = SMP_LTK_P256;
4925 break;
4926 case MGMT_LTK_P256_AUTH:
4927 authenticated = 0x01;
4928 type = SMP_LTK_P256;
4929 break;
4930 case MGMT_LTK_P256_DEBUG:
4931 authenticated = 0x00;
4932 type = SMP_LTK_P256_DEBUG;
4933 default:
4934 continue;
4935 }
4936
4937 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4938 authenticated, key->val, key->enc_size, key->ediv,
4939 key->rand);
4940 }
4941
4942 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4943 NULL, 0);
4944
4945 hci_dev_unlock(hdev);
4946
4947 return err;
4948 }
4949
4950 struct cmd_conn_lookup {
4951 struct hci_conn *conn;
4952 bool valid_tx_power;
4953 u8 mgmt_status;
4954 };
4955
4956 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4957 {
4958 struct cmd_conn_lookup *match = data;
4959 struct mgmt_cp_get_conn_info *cp;
4960 struct mgmt_rp_get_conn_info rp;
4961 struct hci_conn *conn = cmd->user_data;
4962
4963 if (conn != match->conn)
4964 return;
4965
4966 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4967
4968 memset(&rp, 0, sizeof(rp));
4969 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4970 rp.addr.type = cp->addr.type;
4971
4972 if (!match->mgmt_status) {
4973 rp.rssi = conn->rssi;
4974
4975 if (match->valid_tx_power) {
4976 rp.tx_power = conn->tx_power;
4977 rp.max_tx_power = conn->max_tx_power;
4978 } else {
4979 rp.tx_power = HCI_TX_POWER_INVALID;
4980 rp.max_tx_power = HCI_TX_POWER_INVALID;
4981 }
4982 }
4983
4984 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4985 match->mgmt_status, &rp, sizeof(rp));
4986
4987 hci_conn_drop(conn);
4988 hci_conn_put(conn);
4989
4990 mgmt_pending_remove(cmd);
4991 }
4992
4993 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4994 {
4995 struct hci_cp_read_rssi *cp;
4996 struct hci_conn *conn;
4997 struct cmd_conn_lookup match;
4998 u16 handle;
4999
5000 BT_DBG("status 0x%02x", status);
5001
5002 hci_dev_lock(hdev);
5003
5004 /* TX power data is valid in case request completed successfully,
5005 * otherwise we assume it's not valid. At the moment we assume that
5006 * either both or none of current and max values are valid to keep code
5007 * simple.
5008 */
5009 match.valid_tx_power = !status;
5010
5011 /* Commands sent in request are either Read RSSI or Read Transmit Power
5012 * Level so we check which one was last sent to retrieve connection
5013 * handle. Both commands have handle as first parameter so it's safe to
5014 * cast data on the same command struct.
5015 *
5016 * First command sent is always Read RSSI and we fail only if it fails.
5017 * In other case we simply override error to indicate success as we
5018 * already remembered if TX power value is actually valid.
5019 */
5020 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5021 if (!cp) {
5022 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5023 status = 0;
5024 }
5025
5026 if (!cp) {
5027 BT_ERR("invalid sent_cmd in response");
5028 goto unlock;
5029 }
5030
5031 handle = __le16_to_cpu(cp->handle);
5032 conn = hci_conn_hash_lookup_handle(hdev, handle);
5033 if (!conn) {
5034 BT_ERR("unknown handle (%d) in response", handle);
5035 goto unlock;
5036 }
5037
5038 match.conn = conn;
5039 match.mgmt_status = mgmt_status(status);
5040
5041 /* Cache refresh is complete, now reply for mgmt request for given
5042 * connection only.
5043 */
5044 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
5045 get_conn_info_complete, &match);
5046
5047 unlock:
5048 hci_dev_unlock(hdev);
5049 }
5050
5051 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5052 u16 len)
5053 {
5054 struct mgmt_cp_get_conn_info *cp = data;
5055 struct mgmt_rp_get_conn_info rp;
5056 struct hci_conn *conn;
5057 unsigned long conn_info_age;
5058 int err = 0;
5059
5060 BT_DBG("%s", hdev->name);
5061
5062 memset(&rp, 0, sizeof(rp));
5063 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5064 rp.addr.type = cp->addr.type;
5065
5066 if (!bdaddr_type_is_valid(cp->addr.type))
5067 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5068 MGMT_STATUS_INVALID_PARAMS,
5069 &rp, sizeof(rp));
5070
5071 hci_dev_lock(hdev);
5072
5073 if (!hdev_is_powered(hdev)) {
5074 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5075 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5076 goto unlock;
5077 }
5078
5079 if (cp->addr.type == BDADDR_BREDR)
5080 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5081 &cp->addr.bdaddr);
5082 else
5083 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5084
5085 if (!conn || conn->state != BT_CONNECTED) {
5086 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5087 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5088 goto unlock;
5089 }
5090
5091 /* To avoid client trying to guess when to poll again for information we
5092 * calculate conn info age as random value between min/max set in hdev.
5093 */
5094 conn_info_age = hdev->conn_info_min_age +
5095 prandom_u32_max(hdev->conn_info_max_age -
5096 hdev->conn_info_min_age);
5097
5098 /* Query controller to refresh cached values if they are too old or were
5099 * never read.
5100 */
5101 if (time_after(jiffies, conn->conn_info_timestamp +
5102 msecs_to_jiffies(conn_info_age)) ||
5103 !conn->conn_info_timestamp) {
5104 struct hci_request req;
5105 struct hci_cp_read_tx_power req_txp_cp;
5106 struct hci_cp_read_rssi req_rssi_cp;
5107 struct pending_cmd *cmd;
5108
5109 hci_req_init(&req, hdev);
5110 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5111 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5112 &req_rssi_cp);
5113
5114 /* For LE links TX power does not change thus we don't need to
5115 * query for it once value is known.
5116 */
5117 if (!bdaddr_type_is_le(cp->addr.type) ||
5118 conn->tx_power == HCI_TX_POWER_INVALID) {
5119 req_txp_cp.handle = cpu_to_le16(conn->handle);
5120 req_txp_cp.type = 0x00;
5121 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5122 sizeof(req_txp_cp), &req_txp_cp);
5123 }
5124
5125 /* Max TX power needs to be read only once per connection */
5126 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5127 req_txp_cp.handle = cpu_to_le16(conn->handle);
5128 req_txp_cp.type = 0x01;
5129 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5130 sizeof(req_txp_cp), &req_txp_cp);
5131 }
5132
5133 err = hci_req_run(&req, conn_info_refresh_complete);
5134 if (err < 0)
5135 goto unlock;
5136
5137 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5138 data, len);
5139 if (!cmd) {
5140 err = -ENOMEM;
5141 goto unlock;
5142 }
5143
5144 hci_conn_hold(conn);
5145 cmd->user_data = hci_conn_get(conn);
5146
5147 conn->conn_info_timestamp = jiffies;
5148 } else {
5149 /* Cache is valid, just reply with values cached in hci_conn */
5150 rp.rssi = conn->rssi;
5151 rp.tx_power = conn->tx_power;
5152 rp.max_tx_power = conn->max_tx_power;
5153
5154 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5155 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5156 }
5157
5158 unlock:
5159 hci_dev_unlock(hdev);
5160 return err;
5161 }
5162
5163 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5164 {
5165 struct mgmt_cp_get_clock_info *cp;
5166 struct mgmt_rp_get_clock_info rp;
5167 struct hci_cp_read_clock *hci_cp;
5168 struct pending_cmd *cmd;
5169 struct hci_conn *conn;
5170
5171 BT_DBG("%s status %u", hdev->name, status);
5172
5173 hci_dev_lock(hdev);
5174
5175 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5176 if (!hci_cp)
5177 goto unlock;
5178
5179 if (hci_cp->which) {
5180 u16 handle = __le16_to_cpu(hci_cp->handle);
5181 conn = hci_conn_hash_lookup_handle(hdev, handle);
5182 } else {
5183 conn = NULL;
5184 }
5185
5186 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5187 if (!cmd)
5188 goto unlock;
5189
5190 cp = cmd->param;
5191
5192 memset(&rp, 0, sizeof(rp));
5193 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5194
5195 if (status)
5196 goto send_rsp;
5197
5198 rp.local_clock = cpu_to_le32(hdev->clock);
5199
5200 if (conn) {
5201 rp.piconet_clock = cpu_to_le32(conn->clock);
5202 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5203 }
5204
5205 send_rsp:
5206 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5207 &rp, sizeof(rp));
5208 mgmt_pending_remove(cmd);
5209 if (conn) {
5210 hci_conn_drop(conn);
5211 hci_conn_put(conn);
5212 }
5213
5214 unlock:
5215 hci_dev_unlock(hdev);
5216 }
5217
5218 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5219 u16 len)
5220 {
5221 struct mgmt_cp_get_clock_info *cp = data;
5222 struct mgmt_rp_get_clock_info rp;
5223 struct hci_cp_read_clock hci_cp;
5224 struct pending_cmd *cmd;
5225 struct hci_request req;
5226 struct hci_conn *conn;
5227 int err;
5228
5229 BT_DBG("%s", hdev->name);
5230
5231 memset(&rp, 0, sizeof(rp));
5232 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5233 rp.addr.type = cp->addr.type;
5234
5235 if (cp->addr.type != BDADDR_BREDR)
5236 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5237 MGMT_STATUS_INVALID_PARAMS,
5238 &rp, sizeof(rp));
5239
5240 hci_dev_lock(hdev);
5241
5242 if (!hdev_is_powered(hdev)) {
5243 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5244 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5245 goto unlock;
5246 }
5247
5248 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5249 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5250 &cp->addr.bdaddr);
5251 if (!conn || conn->state != BT_CONNECTED) {
5252 err = cmd_complete(sk, hdev->id,
5253 MGMT_OP_GET_CLOCK_INFO,
5254 MGMT_STATUS_NOT_CONNECTED,
5255 &rp, sizeof(rp));
5256 goto unlock;
5257 }
5258 } else {
5259 conn = NULL;
5260 }
5261
5262 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5263 if (!cmd) {
5264 err = -ENOMEM;
5265 goto unlock;
5266 }
5267
5268 hci_req_init(&req, hdev);
5269
5270 memset(&hci_cp, 0, sizeof(hci_cp));
5271 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5272
5273 if (conn) {
5274 hci_conn_hold(conn);
5275 cmd->user_data = hci_conn_get(conn);
5276
5277 hci_cp.handle = cpu_to_le16(conn->handle);
5278 hci_cp.which = 0x01; /* Piconet clock */
5279 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5280 }
5281
5282 err = hci_req_run(&req, get_clock_info_complete);
5283 if (err < 0)
5284 mgmt_pending_remove(cmd);
5285
5286 unlock:
5287 hci_dev_unlock(hdev);
5288 return err;
5289 }
5290
5291 static void device_added(struct sock *sk, struct hci_dev *hdev,
5292 bdaddr_t *bdaddr, u8 type, u8 action)
5293 {
5294 struct mgmt_ev_device_added ev;
5295
5296 bacpy(&ev.addr.bdaddr, bdaddr);
5297 ev.addr.type = type;
5298 ev.action = action;
5299
5300 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5301 }
5302
5303 static int add_device(struct sock *sk, struct hci_dev *hdev,
5304 void *data, u16 len)
5305 {
5306 struct mgmt_cp_add_device *cp = data;
5307 u8 auto_conn, addr_type;
5308 int err;
5309
5310 BT_DBG("%s", hdev->name);
5311
5312 if (!bdaddr_type_is_valid(cp->addr.type) ||
5313 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5314 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5315 MGMT_STATUS_INVALID_PARAMS,
5316 &cp->addr, sizeof(cp->addr));
5317
5318 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5319 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5320 MGMT_STATUS_INVALID_PARAMS,
5321 &cp->addr, sizeof(cp->addr));
5322
5323 hci_dev_lock(hdev);
5324
5325 if (cp->addr.type == BDADDR_BREDR) {
5326 /* Only incoming connections action is supported for now */
5327 if (cp->action != 0x01) {
5328 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5329 MGMT_STATUS_INVALID_PARAMS,
5330 &cp->addr, sizeof(cp->addr));
5331 goto unlock;
5332 }
5333
5334 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5335 cp->addr.type);
5336 if (err)
5337 goto unlock;
5338
5339 hci_update_page_scan(hdev, NULL);
5340
5341 goto added;
5342 }
5343
5344 if (cp->addr.type == BDADDR_LE_PUBLIC)
5345 addr_type = ADDR_LE_DEV_PUBLIC;
5346 else
5347 addr_type = ADDR_LE_DEV_RANDOM;
5348
5349 if (cp->action == 0x02)
5350 auto_conn = HCI_AUTO_CONN_ALWAYS;
5351 else if (cp->action == 0x01)
5352 auto_conn = HCI_AUTO_CONN_DIRECT;
5353 else
5354 auto_conn = HCI_AUTO_CONN_REPORT;
5355
5356 /* If the connection parameters don't exist for this device,
5357 * they will be created and configured with defaults.
5358 */
5359 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5360 auto_conn) < 0) {
5361 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5362 MGMT_STATUS_FAILED,
5363 &cp->addr, sizeof(cp->addr));
5364 goto unlock;
5365 }
5366
5367 added:
5368 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5369
5370 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5371 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5372
5373 unlock:
5374 hci_dev_unlock(hdev);
5375 return err;
5376 }
5377
5378 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5379 bdaddr_t *bdaddr, u8 type)
5380 {
5381 struct mgmt_ev_device_removed ev;
5382
5383 bacpy(&ev.addr.bdaddr, bdaddr);
5384 ev.addr.type = type;
5385
5386 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5387 }
5388
5389 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5390 void *data, u16 len)
5391 {
5392 struct mgmt_cp_remove_device *cp = data;
5393 int err;
5394
5395 BT_DBG("%s", hdev->name);
5396
5397 hci_dev_lock(hdev);
5398
5399 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5400 struct hci_conn_params *params;
5401 u8 addr_type;
5402
5403 if (!bdaddr_type_is_valid(cp->addr.type)) {
5404 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5405 MGMT_STATUS_INVALID_PARAMS,
5406 &cp->addr, sizeof(cp->addr));
5407 goto unlock;
5408 }
5409
5410 if (cp->addr.type == BDADDR_BREDR) {
5411 err = hci_bdaddr_list_del(&hdev->whitelist,
5412 &cp->addr.bdaddr,
5413 cp->addr.type);
5414 if (err) {
5415 err = cmd_complete(sk, hdev->id,
5416 MGMT_OP_REMOVE_DEVICE,
5417 MGMT_STATUS_INVALID_PARAMS,
5418 &cp->addr, sizeof(cp->addr));
5419 goto unlock;
5420 }
5421
5422 hci_update_page_scan(hdev, NULL);
5423
5424 device_removed(sk, hdev, &cp->addr.bdaddr,
5425 cp->addr.type);
5426 goto complete;
5427 }
5428
5429 if (cp->addr.type == BDADDR_LE_PUBLIC)
5430 addr_type = ADDR_LE_DEV_PUBLIC;
5431 else
5432 addr_type = ADDR_LE_DEV_RANDOM;
5433
5434 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5435 addr_type);
5436 if (!params) {
5437 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5438 MGMT_STATUS_INVALID_PARAMS,
5439 &cp->addr, sizeof(cp->addr));
5440 goto unlock;
5441 }
5442
5443 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5444 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5445 MGMT_STATUS_INVALID_PARAMS,
5446 &cp->addr, sizeof(cp->addr));
5447 goto unlock;
5448 }
5449
5450 list_del(&params->action);
5451 list_del(&params->list);
5452 kfree(params);
5453 hci_update_background_scan(hdev);
5454
5455 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5456 } else {
5457 struct hci_conn_params *p, *tmp;
5458 struct bdaddr_list *b, *btmp;
5459
5460 if (cp->addr.type) {
5461 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5462 MGMT_STATUS_INVALID_PARAMS,
5463 &cp->addr, sizeof(cp->addr));
5464 goto unlock;
5465 }
5466
5467 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5468 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5469 list_del(&b->list);
5470 kfree(b);
5471 }
5472
5473 hci_update_page_scan(hdev, NULL);
5474
5475 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5476 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5477 continue;
5478 device_removed(sk, hdev, &p->addr, p->addr_type);
5479 list_del(&p->action);
5480 list_del(&p->list);
5481 kfree(p);
5482 }
5483
5484 BT_DBG("All LE connection parameters were removed");
5485
5486 hci_update_background_scan(hdev);
5487 }
5488
5489 complete:
5490 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5491 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5492
5493 unlock:
5494 hci_dev_unlock(hdev);
5495 return err;
5496 }
5497
5498 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5499 u16 len)
5500 {
5501 struct mgmt_cp_load_conn_param *cp = data;
5502 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5503 sizeof(struct mgmt_conn_param));
5504 u16 param_count, expected_len;
5505 int i;
5506
5507 if (!lmp_le_capable(hdev))
5508 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5509 MGMT_STATUS_NOT_SUPPORTED);
5510
5511 param_count = __le16_to_cpu(cp->param_count);
5512 if (param_count > max_param_count) {
5513 BT_ERR("load_conn_param: too big param_count value %u",
5514 param_count);
5515 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5516 MGMT_STATUS_INVALID_PARAMS);
5517 }
5518
5519 expected_len = sizeof(*cp) + param_count *
5520 sizeof(struct mgmt_conn_param);
5521 if (expected_len != len) {
5522 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5523 expected_len, len);
5524 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5525 MGMT_STATUS_INVALID_PARAMS);
5526 }
5527
5528 BT_DBG("%s param_count %u", hdev->name, param_count);
5529
5530 hci_dev_lock(hdev);
5531
5532 hci_conn_params_clear_disabled(hdev);
5533
5534 for (i = 0; i < param_count; i++) {
5535 struct mgmt_conn_param *param = &cp->params[i];
5536 struct hci_conn_params *hci_param;
5537 u16 min, max, latency, timeout;
5538 u8 addr_type;
5539
5540 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5541 param->addr.type);
5542
5543 if (param->addr.type == BDADDR_LE_PUBLIC) {
5544 addr_type = ADDR_LE_DEV_PUBLIC;
5545 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5546 addr_type = ADDR_LE_DEV_RANDOM;
5547 } else {
5548 BT_ERR("Ignoring invalid connection parameters");
5549 continue;
5550 }
5551
5552 min = le16_to_cpu(param->min_interval);
5553 max = le16_to_cpu(param->max_interval);
5554 latency = le16_to_cpu(param->latency);
5555 timeout = le16_to_cpu(param->timeout);
5556
5557 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5558 min, max, latency, timeout);
5559
5560 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5561 BT_ERR("Ignoring invalid connection parameters");
5562 continue;
5563 }
5564
5565 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5566 addr_type);
5567 if (!hci_param) {
5568 BT_ERR("Failed to add connection parameters");
5569 continue;
5570 }
5571
5572 hci_param->conn_min_interval = min;
5573 hci_param->conn_max_interval = max;
5574 hci_param->conn_latency = latency;
5575 hci_param->supervision_timeout = timeout;
5576 }
5577
5578 hci_dev_unlock(hdev);
5579
5580 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5581 }
5582
5583 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5584 void *data, u16 len)
5585 {
5586 struct mgmt_cp_set_external_config *cp = data;
5587 bool changed;
5588 int err;
5589
5590 BT_DBG("%s", hdev->name);
5591
5592 if (hdev_is_powered(hdev))
5593 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5594 MGMT_STATUS_REJECTED);
5595
5596 if (cp->config != 0x00 && cp->config != 0x01)
5597 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5598 MGMT_STATUS_INVALID_PARAMS);
5599
5600 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5601 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5602 MGMT_STATUS_NOT_SUPPORTED);
5603
5604 hci_dev_lock(hdev);
5605
5606 if (cp->config)
5607 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5608 &hdev->dev_flags);
5609 else
5610 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5611 &hdev->dev_flags);
5612
5613 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5614 if (err < 0)
5615 goto unlock;
5616
5617 if (!changed)
5618 goto unlock;
5619
5620 err = new_options(hdev, sk);
5621
5622 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5623 mgmt_index_removed(hdev);
5624
5625 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5626 set_bit(HCI_CONFIG, &hdev->dev_flags);
5627 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5628
5629 queue_work(hdev->req_workqueue, &hdev->power_on);
5630 } else {
5631 set_bit(HCI_RAW, &hdev->flags);
5632 mgmt_index_added(hdev);
5633 }
5634 }
5635
5636 unlock:
5637 hci_dev_unlock(hdev);
5638 return err;
5639 }
5640
5641 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5642 void *data, u16 len)
5643 {
5644 struct mgmt_cp_set_public_address *cp = data;
5645 bool changed;
5646 int err;
5647
5648 BT_DBG("%s", hdev->name);
5649
5650 if (hdev_is_powered(hdev))
5651 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5652 MGMT_STATUS_REJECTED);
5653
5654 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5655 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5656 MGMT_STATUS_INVALID_PARAMS);
5657
5658 if (!hdev->set_bdaddr)
5659 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5660 MGMT_STATUS_NOT_SUPPORTED);
5661
5662 hci_dev_lock(hdev);
5663
5664 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5665 bacpy(&hdev->public_addr, &cp->bdaddr);
5666
5667 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5668 if (err < 0)
5669 goto unlock;
5670
5671 if (!changed)
5672 goto unlock;
5673
5674 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5675 err = new_options(hdev, sk);
5676
5677 if (is_configured(hdev)) {
5678 mgmt_index_removed(hdev);
5679
5680 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5681
5682 set_bit(HCI_CONFIG, &hdev->dev_flags);
5683 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5684
5685 queue_work(hdev->req_workqueue, &hdev->power_on);
5686 }
5687
5688 unlock:
5689 hci_dev_unlock(hdev);
5690 return err;
5691 }
5692
5693 static const struct mgmt_handler {
5694 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5695 u16 data_len);
5696 bool var_len;
5697 size_t data_len;
5698 } mgmt_handlers[] = {
5699 { NULL }, /* 0x0000 (no command) */
5700 { read_version, false, MGMT_READ_VERSION_SIZE },
5701 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5702 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5703 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5704 { set_powered, false, MGMT_SETTING_SIZE },
5705 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5706 { set_connectable, false, MGMT_SETTING_SIZE },
5707 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5708 { set_bondable, false, MGMT_SETTING_SIZE },
5709 { set_link_security, false, MGMT_SETTING_SIZE },
5710 { set_ssp, false, MGMT_SETTING_SIZE },
5711 { set_hs, false, MGMT_SETTING_SIZE },
5712 { set_le, false, MGMT_SETTING_SIZE },
5713 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5714 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5715 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5716 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5717 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5718 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5719 { disconnect, false, MGMT_DISCONNECT_SIZE },
5720 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5721 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5722 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5723 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5724 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5725 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5726 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5727 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5728 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5729 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5730 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5731 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5732 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5733 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5734 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5735 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5736 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5737 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5738 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5739 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5740 { set_advertising, false, MGMT_SETTING_SIZE },
5741 { set_bredr, false, MGMT_SETTING_SIZE },
5742 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5743 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5744 { set_secure_conn, false, MGMT_SETTING_SIZE },
5745 { set_debug_keys, false, MGMT_SETTING_SIZE },
5746 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5747 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5748 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5749 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5750 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5751 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5752 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5753 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5754 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5755 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5756 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5757 };
5758
5759 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5760 {
5761 void *buf;
5762 u8 *cp;
5763 struct mgmt_hdr *hdr;
5764 u16 opcode, index, len;
5765 struct hci_dev *hdev = NULL;
5766 const struct mgmt_handler *handler;
5767 int err;
5768
5769 BT_DBG("got %zu bytes", msglen);
5770
5771 if (msglen < sizeof(*hdr))
5772 return -EINVAL;
5773
5774 buf = kmalloc(msglen, GFP_KERNEL);
5775 if (!buf)
5776 return -ENOMEM;
5777
5778 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5779 err = -EFAULT;
5780 goto done;
5781 }
5782
5783 hdr = buf;
5784 opcode = __le16_to_cpu(hdr->opcode);
5785 index = __le16_to_cpu(hdr->index);
5786 len = __le16_to_cpu(hdr->len);
5787
5788 if (len != msglen - sizeof(*hdr)) {
5789 err = -EINVAL;
5790 goto done;
5791 }
5792
5793 if (index != MGMT_INDEX_NONE) {
5794 hdev = hci_dev_get(index);
5795 if (!hdev) {
5796 err = cmd_status(sk, index, opcode,
5797 MGMT_STATUS_INVALID_INDEX);
5798 goto done;
5799 }
5800
5801 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5802 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5803 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5804 err = cmd_status(sk, index, opcode,
5805 MGMT_STATUS_INVALID_INDEX);
5806 goto done;
5807 }
5808
5809 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5810 opcode != MGMT_OP_READ_CONFIG_INFO &&
5811 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5812 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5813 err = cmd_status(sk, index, opcode,
5814 MGMT_STATUS_INVALID_INDEX);
5815 goto done;
5816 }
5817 }
5818
5819 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5820 mgmt_handlers[opcode].func == NULL) {
5821 BT_DBG("Unknown op %u", opcode);
5822 err = cmd_status(sk, index, opcode,
5823 MGMT_STATUS_UNKNOWN_COMMAND);
5824 goto done;
5825 }
5826
5827 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5828 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5829 err = cmd_status(sk, index, opcode,
5830 MGMT_STATUS_INVALID_INDEX);
5831 goto done;
5832 }
5833
5834 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5835 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5836 err = cmd_status(sk, index, opcode,
5837 MGMT_STATUS_INVALID_INDEX);
5838 goto done;
5839 }
5840
5841 handler = &mgmt_handlers[opcode];
5842
5843 if ((handler->var_len && len < handler->data_len) ||
5844 (!handler->var_len && len != handler->data_len)) {
5845 err = cmd_status(sk, index, opcode,
5846 MGMT_STATUS_INVALID_PARAMS);
5847 goto done;
5848 }
5849
5850 if (hdev)
5851 mgmt_init_hdev(sk, hdev);
5852
5853 cp = buf + sizeof(*hdr);
5854
5855 err = handler->func(sk, hdev, cp, len);
5856 if (err < 0)
5857 goto done;
5858
5859 err = msglen;
5860
5861 done:
5862 if (hdev)
5863 hci_dev_put(hdev);
5864
5865 kfree(buf);
5866 return err;
5867 }
5868
5869 void mgmt_index_added(struct hci_dev *hdev)
5870 {
5871 if (hdev->dev_type != HCI_BREDR)
5872 return;
5873
5874 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5875 return;
5876
5877 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5878 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5879 else
5880 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5881 }
5882
5883 void mgmt_index_removed(struct hci_dev *hdev)
5884 {
5885 u8 status = MGMT_STATUS_INVALID_INDEX;
5886
5887 if (hdev->dev_type != HCI_BREDR)
5888 return;
5889
5890 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5891 return;
5892
5893 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5894
5895 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5896 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5897 else
5898 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5899 }
5900
5901 /* This function requires the caller holds hdev->lock */
5902 static void restart_le_actions(struct hci_dev *hdev)
5903 {
5904 struct hci_conn_params *p;
5905
5906 list_for_each_entry(p, &hdev->le_conn_params, list) {
5907 /* Needed for AUTO_OFF case where might not "really"
5908 * have been powered off.
5909 */
5910 list_del_init(&p->action);
5911
5912 switch (p->auto_connect) {
5913 case HCI_AUTO_CONN_DIRECT:
5914 case HCI_AUTO_CONN_ALWAYS:
5915 list_add(&p->action, &hdev->pend_le_conns);
5916 break;
5917 case HCI_AUTO_CONN_REPORT:
5918 list_add(&p->action, &hdev->pend_le_reports);
5919 break;
5920 default:
5921 break;
5922 }
5923 }
5924
5925 hci_update_background_scan(hdev);
5926 }
5927
5928 static void powered_complete(struct hci_dev *hdev, u8 status)
5929 {
5930 struct cmd_lookup match = { NULL, hdev };
5931
5932 BT_DBG("status 0x%02x", status);
5933
5934 hci_dev_lock(hdev);
5935
5936 restart_le_actions(hdev);
5937
5938 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5939
5940 new_settings(hdev, match.sk);
5941
5942 hci_dev_unlock(hdev);
5943
5944 if (match.sk)
5945 sock_put(match.sk);
5946 }
5947
5948 static int powered_update_hci(struct hci_dev *hdev)
5949 {
5950 struct hci_request req;
5951 u8 link_sec;
5952
5953 hci_req_init(&req, hdev);
5954
5955 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5956 !lmp_host_ssp_capable(hdev)) {
5957 u8 ssp = 1;
5958
5959 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5960 }
5961
5962 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5963 lmp_bredr_capable(hdev)) {
5964 struct hci_cp_write_le_host_supported cp;
5965
5966 cp.le = 0x01;
5967 cp.simul = 0x00;
5968
5969 /* Check first if we already have the right
5970 * host state (host features set)
5971 */
5972 if (cp.le != lmp_host_le_capable(hdev) ||
5973 cp.simul != lmp_host_le_br_capable(hdev))
5974 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5975 sizeof(cp), &cp);
5976 }
5977
5978 if (lmp_le_capable(hdev)) {
5979 /* Make sure the controller has a good default for
5980 * advertising data. This also applies to the case
5981 * where BR/EDR was toggled during the AUTO_OFF phase.
5982 */
5983 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5984 update_adv_data(&req);
5985 update_scan_rsp_data(&req);
5986 }
5987
5988 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5989 enable_advertising(&req);
5990 }
5991
5992 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5993 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5994 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5995 sizeof(link_sec), &link_sec);
5996
5997 if (lmp_bredr_capable(hdev)) {
5998 write_fast_connectable(&req, false);
5999 hci_update_page_scan(hdev, &req);
6000 update_class(&req);
6001 update_name(&req);
6002 update_eir(&req);
6003 }
6004
6005 return hci_req_run(&req, powered_complete);
6006 }
6007
6008 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6009 {
6010 struct cmd_lookup match = { NULL, hdev };
6011 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
6012 u8 zero_cod[] = { 0, 0, 0 };
6013 int err;
6014
6015 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6016 return 0;
6017
6018 if (powered) {
6019 if (powered_update_hci(hdev) == 0)
6020 return 0;
6021
6022 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6023 &match);
6024 goto new_settings;
6025 }
6026
6027 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6028 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
6029
6030 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6031 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6032 zero_cod, sizeof(zero_cod), NULL);
6033
6034 new_settings:
6035 err = new_settings(hdev, match.sk);
6036
6037 if (match.sk)
6038 sock_put(match.sk);
6039
6040 return err;
6041 }
6042
6043 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6044 {
6045 struct pending_cmd *cmd;
6046 u8 status;
6047
6048 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6049 if (!cmd)
6050 return;
6051
6052 if (err == -ERFKILL)
6053 status = MGMT_STATUS_RFKILLED;
6054 else
6055 status = MGMT_STATUS_FAILED;
6056
6057 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6058
6059 mgmt_pending_remove(cmd);
6060 }
6061
6062 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6063 {
6064 struct hci_request req;
6065
6066 hci_dev_lock(hdev);
6067
6068 /* When discoverable timeout triggers, then just make sure
6069 * the limited discoverable flag is cleared. Even in the case
6070 * of a timeout triggered from general discoverable, it is
6071 * safe to unconditionally clear the flag.
6072 */
6073 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6074 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6075
6076 hci_req_init(&req, hdev);
6077 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6078 u8 scan = SCAN_PAGE;
6079 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6080 sizeof(scan), &scan);
6081 }
6082 update_class(&req);
6083 update_adv_data(&req);
6084 hci_req_run(&req, NULL);
6085
6086 hdev->discov_timeout = 0;
6087
6088 new_settings(hdev, NULL);
6089
6090 hci_dev_unlock(hdev);
6091 }
6092
6093 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6094 bool persistent)
6095 {
6096 struct mgmt_ev_new_link_key ev;
6097
6098 memset(&ev, 0, sizeof(ev));
6099
6100 ev.store_hint = persistent;
6101 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6102 ev.key.addr.type = BDADDR_BREDR;
6103 ev.key.type = key->type;
6104 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6105 ev.key.pin_len = key->pin_len;
6106
6107 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6108 }
6109
6110 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6111 {
6112 switch (ltk->type) {
6113 case SMP_LTK:
6114 case SMP_LTK_SLAVE:
6115 if (ltk->authenticated)
6116 return MGMT_LTK_AUTHENTICATED;
6117 return MGMT_LTK_UNAUTHENTICATED;
6118 case SMP_LTK_P256:
6119 if (ltk->authenticated)
6120 return MGMT_LTK_P256_AUTH;
6121 return MGMT_LTK_P256_UNAUTH;
6122 case SMP_LTK_P256_DEBUG:
6123 return MGMT_LTK_P256_DEBUG;
6124 }
6125
6126 return MGMT_LTK_UNAUTHENTICATED;
6127 }
6128
6129 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6130 {
6131 struct mgmt_ev_new_long_term_key ev;
6132
6133 memset(&ev, 0, sizeof(ev));
6134
6135 /* Devices using resolvable or non-resolvable random addresses
6136 * without providing an indentity resolving key don't require
6137 * to store long term keys. Their addresses will change the
6138 * next time around.
6139 *
6140 * Only when a remote device provides an identity address
6141 * make sure the long term key is stored. If the remote
6142 * identity is known, the long term keys are internally
6143 * mapped to the identity address. So allow static random
6144 * and public addresses here.
6145 */
6146 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6147 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6148 ev.store_hint = 0x00;
6149 else
6150 ev.store_hint = persistent;
6151
6152 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6153 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6154 ev.key.type = mgmt_ltk_type(key);
6155 ev.key.enc_size = key->enc_size;
6156 ev.key.ediv = key->ediv;
6157 ev.key.rand = key->rand;
6158
6159 if (key->type == SMP_LTK)
6160 ev.key.master = 1;
6161
6162 memcpy(ev.key.val, key->val, sizeof(key->val));
6163
6164 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6165 }
6166
6167 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6168 {
6169 struct mgmt_ev_new_irk ev;
6170
6171 memset(&ev, 0, sizeof(ev));
6172
6173 /* For identity resolving keys from devices that are already
6174 * using a public address or static random address, do not
6175 * ask for storing this key. The identity resolving key really
6176 * is only mandatory for devices using resovlable random
6177 * addresses.
6178 *
6179 * Storing all identity resolving keys has the downside that
6180 * they will be also loaded on next boot of they system. More
6181 * identity resolving keys, means more time during scanning is
6182 * needed to actually resolve these addresses.
6183 */
6184 if (bacmp(&irk->rpa, BDADDR_ANY))
6185 ev.store_hint = 0x01;
6186 else
6187 ev.store_hint = 0x00;
6188
6189 bacpy(&ev.rpa, &irk->rpa);
6190 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6191 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6192 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6193
6194 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6195 }
6196
6197 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6198 bool persistent)
6199 {
6200 struct mgmt_ev_new_csrk ev;
6201
6202 memset(&ev, 0, sizeof(ev));
6203
6204 /* Devices using resolvable or non-resolvable random addresses
6205 * without providing an indentity resolving key don't require
6206 * to store signature resolving keys. Their addresses will change
6207 * the next time around.
6208 *
6209 * Only when a remote device provides an identity address
6210 * make sure the signature resolving key is stored. So allow
6211 * static random and public addresses here.
6212 */
6213 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6214 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6215 ev.store_hint = 0x00;
6216 else
6217 ev.store_hint = persistent;
6218
6219 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6220 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6221 ev.key.master = csrk->master;
6222 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6223
6224 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6225 }
6226
6227 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6228 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6229 u16 max_interval, u16 latency, u16 timeout)
6230 {
6231 struct mgmt_ev_new_conn_param ev;
6232
6233 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6234 return;
6235
6236 memset(&ev, 0, sizeof(ev));
6237 bacpy(&ev.addr.bdaddr, bdaddr);
6238 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6239 ev.store_hint = store_hint;
6240 ev.min_interval = cpu_to_le16(min_interval);
6241 ev.max_interval = cpu_to_le16(max_interval);
6242 ev.latency = cpu_to_le16(latency);
6243 ev.timeout = cpu_to_le16(timeout);
6244
6245 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6246 }
6247
6248 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6249 u8 data_len)
6250 {
6251 eir[eir_len++] = sizeof(type) + data_len;
6252 eir[eir_len++] = type;
6253 memcpy(&eir[eir_len], data, data_len);
6254 eir_len += data_len;
6255
6256 return eir_len;
6257 }
6258
6259 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6260 u32 flags, u8 *name, u8 name_len)
6261 {
6262 char buf[512];
6263 struct mgmt_ev_device_connected *ev = (void *) buf;
6264 u16 eir_len = 0;
6265
6266 bacpy(&ev->addr.bdaddr, &conn->dst);
6267 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6268
6269 ev->flags = __cpu_to_le32(flags);
6270
6271 /* We must ensure that the EIR Data fields are ordered and
6272 * unique. Keep it simple for now and avoid the problem by not
6273 * adding any BR/EDR data to the LE adv.
6274 */
6275 if (conn->le_adv_data_len > 0) {
6276 memcpy(&ev->eir[eir_len],
6277 conn->le_adv_data, conn->le_adv_data_len);
6278 eir_len = conn->le_adv_data_len;
6279 } else {
6280 if (name_len > 0)
6281 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6282 name, name_len);
6283
6284 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6285 eir_len = eir_append_data(ev->eir, eir_len,
6286 EIR_CLASS_OF_DEV,
6287 conn->dev_class, 3);
6288 }
6289
6290 ev->eir_len = cpu_to_le16(eir_len);
6291
6292 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6293 sizeof(*ev) + eir_len, NULL);
6294 }
6295
6296 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6297 {
6298 struct mgmt_cp_disconnect *cp = cmd->param;
6299 struct sock **sk = data;
6300 struct mgmt_rp_disconnect rp;
6301
6302 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6303 rp.addr.type = cp->addr.type;
6304
6305 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6306 sizeof(rp));
6307
6308 *sk = cmd->sk;
6309 sock_hold(*sk);
6310
6311 mgmt_pending_remove(cmd);
6312 }
6313
6314 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6315 {
6316 struct hci_dev *hdev = data;
6317 struct mgmt_cp_unpair_device *cp = cmd->param;
6318 struct mgmt_rp_unpair_device rp;
6319
6320 memset(&rp, 0, sizeof(rp));
6321 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6322 rp.addr.type = cp->addr.type;
6323
6324 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6325
6326 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6327
6328 mgmt_pending_remove(cmd);
6329 }
6330
6331 bool mgmt_powering_down(struct hci_dev *hdev)
6332 {
6333 struct pending_cmd *cmd;
6334 struct mgmt_mode *cp;
6335
6336 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6337 if (!cmd)
6338 return false;
6339
6340 cp = cmd->param;
6341 if (!cp->val)
6342 return true;
6343
6344 return false;
6345 }
6346
6347 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6348 u8 link_type, u8 addr_type, u8 reason,
6349 bool mgmt_connected)
6350 {
6351 struct mgmt_ev_device_disconnected ev;
6352 struct sock *sk = NULL;
6353
6354 /* The connection is still in hci_conn_hash so test for 1
6355 * instead of 0 to know if this is the last one.
6356 */
6357 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6358 cancel_delayed_work(&hdev->power_off);
6359 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6360 }
6361
6362 if (!mgmt_connected)
6363 return;
6364
6365 if (link_type != ACL_LINK && link_type != LE_LINK)
6366 return;
6367
6368 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6369
6370 bacpy(&ev.addr.bdaddr, bdaddr);
6371 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6372 ev.reason = reason;
6373
6374 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6375
6376 if (sk)
6377 sock_put(sk);
6378
6379 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6380 hdev);
6381 }
6382
6383 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6384 u8 link_type, u8 addr_type, u8 status)
6385 {
6386 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6387 struct mgmt_cp_disconnect *cp;
6388 struct mgmt_rp_disconnect rp;
6389 struct pending_cmd *cmd;
6390
6391 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6392 hdev);
6393
6394 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6395 if (!cmd)
6396 return;
6397
6398 cp = cmd->param;
6399
6400 if (bacmp(bdaddr, &cp->addr.bdaddr))
6401 return;
6402
6403 if (cp->addr.type != bdaddr_type)
6404 return;
6405
6406 bacpy(&rp.addr.bdaddr, bdaddr);
6407 rp.addr.type = bdaddr_type;
6408
6409 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6410 mgmt_status(status), &rp, sizeof(rp));
6411
6412 mgmt_pending_remove(cmd);
6413 }
6414
6415 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6416 u8 addr_type, u8 status)
6417 {
6418 struct mgmt_ev_connect_failed ev;
6419
6420 /* The connection is still in hci_conn_hash so test for 1
6421 * instead of 0 to know if this is the last one.
6422 */
6423 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6424 cancel_delayed_work(&hdev->power_off);
6425 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6426 }
6427
6428 bacpy(&ev.addr.bdaddr, bdaddr);
6429 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6430 ev.status = mgmt_status(status);
6431
6432 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6433 }
6434
6435 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6436 {
6437 struct mgmt_ev_pin_code_request ev;
6438
6439 bacpy(&ev.addr.bdaddr, bdaddr);
6440 ev.addr.type = BDADDR_BREDR;
6441 ev.secure = secure;
6442
6443 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6444 }
6445
6446 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6447 u8 status)
6448 {
6449 struct pending_cmd *cmd;
6450 struct mgmt_rp_pin_code_reply rp;
6451
6452 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6453 if (!cmd)
6454 return;
6455
6456 bacpy(&rp.addr.bdaddr, bdaddr);
6457 rp.addr.type = BDADDR_BREDR;
6458
6459 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6460 mgmt_status(status), &rp, sizeof(rp));
6461
6462 mgmt_pending_remove(cmd);
6463 }
6464
6465 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6466 u8 status)
6467 {
6468 struct pending_cmd *cmd;
6469 struct mgmt_rp_pin_code_reply rp;
6470
6471 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6472 if (!cmd)
6473 return;
6474
6475 bacpy(&rp.addr.bdaddr, bdaddr);
6476 rp.addr.type = BDADDR_BREDR;
6477
6478 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6479 mgmt_status(status), &rp, sizeof(rp));
6480
6481 mgmt_pending_remove(cmd);
6482 }
6483
6484 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6485 u8 link_type, u8 addr_type, u32 value,
6486 u8 confirm_hint)
6487 {
6488 struct mgmt_ev_user_confirm_request ev;
6489
6490 BT_DBG("%s", hdev->name);
6491
6492 bacpy(&ev.addr.bdaddr, bdaddr);
6493 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6494 ev.confirm_hint = confirm_hint;
6495 ev.value = cpu_to_le32(value);
6496
6497 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6498 NULL);
6499 }
6500
6501 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6502 u8 link_type, u8 addr_type)
6503 {
6504 struct mgmt_ev_user_passkey_request ev;
6505
6506 BT_DBG("%s", hdev->name);
6507
6508 bacpy(&ev.addr.bdaddr, bdaddr);
6509 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6510
6511 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6512 NULL);
6513 }
6514
6515 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6516 u8 link_type, u8 addr_type, u8 status,
6517 u8 opcode)
6518 {
6519 struct pending_cmd *cmd;
6520 struct mgmt_rp_user_confirm_reply rp;
6521 int err;
6522
6523 cmd = mgmt_pending_find(opcode, hdev);
6524 if (!cmd)
6525 return -ENOENT;
6526
6527 bacpy(&rp.addr.bdaddr, bdaddr);
6528 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6529 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6530 &rp, sizeof(rp));
6531
6532 mgmt_pending_remove(cmd);
6533
6534 return err;
6535 }
6536
6537 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6538 u8 link_type, u8 addr_type, u8 status)
6539 {
6540 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6541 status, MGMT_OP_USER_CONFIRM_REPLY);
6542 }
6543
6544 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6545 u8 link_type, u8 addr_type, u8 status)
6546 {
6547 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6548 status,
6549 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6550 }
6551
6552 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6553 u8 link_type, u8 addr_type, u8 status)
6554 {
6555 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6556 status, MGMT_OP_USER_PASSKEY_REPLY);
6557 }
6558
6559 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6560 u8 link_type, u8 addr_type, u8 status)
6561 {
6562 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6563 status,
6564 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6565 }
6566
6567 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6568 u8 link_type, u8 addr_type, u32 passkey,
6569 u8 entered)
6570 {
6571 struct mgmt_ev_passkey_notify ev;
6572
6573 BT_DBG("%s", hdev->name);
6574
6575 bacpy(&ev.addr.bdaddr, bdaddr);
6576 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6577 ev.passkey = __cpu_to_le32(passkey);
6578 ev.entered = entered;
6579
6580 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6581 }
6582
6583 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6584 {
6585 struct mgmt_ev_auth_failed ev;
6586 struct pending_cmd *cmd;
6587 u8 status = mgmt_status(hci_status);
6588
6589 bacpy(&ev.addr.bdaddr, &conn->dst);
6590 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6591 ev.status = status;
6592
6593 cmd = find_pairing(conn);
6594
6595 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6596 cmd ? cmd->sk : NULL);
6597
6598 if (cmd)
6599 pairing_complete(cmd, status);
6600 }
6601
6602 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6603 {
6604 struct cmd_lookup match = { NULL, hdev };
6605 bool changed;
6606
6607 if (status) {
6608 u8 mgmt_err = mgmt_status(status);
6609 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6610 cmd_status_rsp, &mgmt_err);
6611 return;
6612 }
6613
6614 if (test_bit(HCI_AUTH, &hdev->flags))
6615 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6616 &hdev->dev_flags);
6617 else
6618 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6619 &hdev->dev_flags);
6620
6621 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6622 &match);
6623
6624 if (changed)
6625 new_settings(hdev, match.sk);
6626
6627 if (match.sk)
6628 sock_put(match.sk);
6629 }
6630
6631 static void clear_eir(struct hci_request *req)
6632 {
6633 struct hci_dev *hdev = req->hdev;
6634 struct hci_cp_write_eir cp;
6635
6636 if (!lmp_ext_inq_capable(hdev))
6637 return;
6638
6639 memset(hdev->eir, 0, sizeof(hdev->eir));
6640
6641 memset(&cp, 0, sizeof(cp));
6642
6643 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6644 }
6645
6646 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6647 {
6648 struct cmd_lookup match = { NULL, hdev };
6649 struct hci_request req;
6650 bool changed = false;
6651
6652 if (status) {
6653 u8 mgmt_err = mgmt_status(status);
6654
6655 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6656 &hdev->dev_flags)) {
6657 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6658 new_settings(hdev, NULL);
6659 }
6660
6661 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6662 &mgmt_err);
6663 return;
6664 }
6665
6666 if (enable) {
6667 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6668 } else {
6669 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6670 if (!changed)
6671 changed = test_and_clear_bit(HCI_HS_ENABLED,
6672 &hdev->dev_flags);
6673 else
6674 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6675 }
6676
6677 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6678
6679 if (changed)
6680 new_settings(hdev, match.sk);
6681
6682 if (match.sk)
6683 sock_put(match.sk);
6684
6685 hci_req_init(&req, hdev);
6686
6687 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6688 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6689 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6690 sizeof(enable), &enable);
6691 update_eir(&req);
6692 } else {
6693 clear_eir(&req);
6694 }
6695
6696 hci_req_run(&req, NULL);
6697 }
6698
6699 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6700 {
6701 struct cmd_lookup match = { NULL, hdev };
6702 bool changed = false;
6703
6704 if (status) {
6705 u8 mgmt_err = mgmt_status(status);
6706
6707 if (enable) {
6708 if (test_and_clear_bit(HCI_SC_ENABLED,
6709 &hdev->dev_flags))
6710 new_settings(hdev, NULL);
6711 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6712 }
6713
6714 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6715 cmd_status_rsp, &mgmt_err);
6716 return;
6717 }
6718
6719 if (enable) {
6720 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6721 } else {
6722 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6723 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6724 }
6725
6726 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6727 settings_rsp, &match);
6728
6729 if (changed)
6730 new_settings(hdev, match.sk);
6731
6732 if (match.sk)
6733 sock_put(match.sk);
6734 }
6735
6736 static void sk_lookup(struct pending_cmd *cmd, void *data)
6737 {
6738 struct cmd_lookup *match = data;
6739
6740 if (match->sk == NULL) {
6741 match->sk = cmd->sk;
6742 sock_hold(match->sk);
6743 }
6744 }
6745
6746 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6747 u8 status)
6748 {
6749 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6750
6751 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6752 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6753 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6754
6755 if (!status)
6756 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6757 NULL);
6758
6759 if (match.sk)
6760 sock_put(match.sk);
6761 }
6762
6763 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6764 {
6765 struct mgmt_cp_set_local_name ev;
6766 struct pending_cmd *cmd;
6767
6768 if (status)
6769 return;
6770
6771 memset(&ev, 0, sizeof(ev));
6772 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6773 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6774
6775 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6776 if (!cmd) {
6777 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6778
6779 /* If this is a HCI command related to powering on the
6780 * HCI dev don't send any mgmt signals.
6781 */
6782 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6783 return;
6784 }
6785
6786 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6787 cmd ? cmd->sk : NULL);
6788 }
6789
6790 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6791 u8 *rand192, u8 *hash256, u8 *rand256,
6792 u8 status)
6793 {
6794 struct pending_cmd *cmd;
6795
6796 BT_DBG("%s status %u", hdev->name, status);
6797
6798 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6799 if (!cmd)
6800 return;
6801
6802 if (status) {
6803 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6804 mgmt_status(status));
6805 } else {
6806 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6807 hash256 && rand256) {
6808 struct mgmt_rp_read_local_oob_ext_data rp;
6809
6810 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6811 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
6812
6813 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6814 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
6815
6816 cmd_complete(cmd->sk, hdev->id,
6817 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6818 &rp, sizeof(rp));
6819 } else {
6820 struct mgmt_rp_read_local_oob_data rp;
6821
6822 memcpy(rp.hash, hash192, sizeof(rp.hash));
6823 memcpy(rp.rand, rand192, sizeof(rp.rand));
6824
6825 cmd_complete(cmd->sk, hdev->id,
6826 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6827 &rp, sizeof(rp));
6828 }
6829 }
6830
6831 mgmt_pending_remove(cmd);
6832 }
6833
6834 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6835 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6836 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6837 {
6838 char buf[512];
6839 struct mgmt_ev_device_found *ev = (void *) buf;
6840 size_t ev_size;
6841
6842 /* Don't send events for a non-kernel initiated discovery. With
6843 * LE one exception is if we have pend_le_reports > 0 in which
6844 * case we're doing passive scanning and want these events.
6845 */
6846 if (!hci_discovery_active(hdev)) {
6847 if (link_type == ACL_LINK)
6848 return;
6849 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6850 return;
6851 }
6852
6853 /* Make sure that the buffer is big enough. The 5 extra bytes
6854 * are for the potential CoD field.
6855 */
6856 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6857 return;
6858
6859 memset(buf, 0, sizeof(buf));
6860
6861 bacpy(&ev->addr.bdaddr, bdaddr);
6862 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6863 ev->rssi = rssi;
6864 ev->flags = cpu_to_le32(flags);
6865
6866 if (eir_len > 0)
6867 memcpy(ev->eir, eir, eir_len);
6868
6869 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6870 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6871 dev_class, 3);
6872
6873 if (scan_rsp_len > 0)
6874 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6875
6876 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6877 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6878
6879 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6880 }
6881
6882 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6883 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6884 {
6885 struct mgmt_ev_device_found *ev;
6886 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6887 u16 eir_len;
6888
6889 ev = (struct mgmt_ev_device_found *) buf;
6890
6891 memset(buf, 0, sizeof(buf));
6892
6893 bacpy(&ev->addr.bdaddr, bdaddr);
6894 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6895 ev->rssi = rssi;
6896
6897 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6898 name_len);
6899
6900 ev->eir_len = cpu_to_le16(eir_len);
6901
6902 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6903 }
6904
6905 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6906 {
6907 struct mgmt_ev_discovering ev;
6908 struct pending_cmd *cmd;
6909
6910 BT_DBG("%s discovering %u", hdev->name, discovering);
6911
6912 if (discovering)
6913 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6914 else
6915 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6916
6917 if (cmd != NULL) {
6918 u8 type = hdev->discovery.type;
6919
6920 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6921 sizeof(type));
6922 mgmt_pending_remove(cmd);
6923 }
6924
6925 memset(&ev, 0, sizeof(ev));
6926 ev.type = hdev->discovery.type;
6927 ev.discovering = discovering;
6928
6929 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6930 }
6931
6932 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6933 {
6934 BT_DBG("%s status %u", hdev->name, status);
6935 }
6936
6937 void mgmt_reenable_advertising(struct hci_dev *hdev)
6938 {
6939 struct hci_request req;
6940
6941 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6942 return;
6943
6944 hci_req_init(&req, hdev);
6945 enable_advertising(&req);
6946 hci_req_run(&req, adv_enable_complete);
6947 }