Bluetooth: Fix HCI command send functions to use const specifier
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
34
35 bool enable_hs;
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 };
80
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
83 MGMT_EV_INDEX_ADDED,
84 MGMT_EV_INDEX_REMOVED,
85 MGMT_EV_NEW_SETTINGS,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
88 MGMT_EV_NEW_LINK_KEY,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
96 MGMT_EV_AUTH_FAILED,
97 MGMT_EV_DEVICE_FOUND,
98 MGMT_EV_DISCOVERING,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
103 };
104
105 /*
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
108 */
109 #define LE_SCAN_WIN 0x12
110 #define LE_SCAN_INT 0x12
111 #define LE_SCAN_TIMEOUT_LE_ONLY msecs_to_jiffies(10240)
112 #define LE_SCAN_TIMEOUT_BREDR_LE msecs_to_jiffies(5120)
113
114 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
115 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
116
117 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
118
119 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
120 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
121
122 struct pending_cmd {
123 struct list_head list;
124 u16 opcode;
125 int index;
126 void *param;
127 struct sock *sk;
128 void *user_data;
129 };
130
131 /* HCI to MGMT error code conversion table */
132 static u8 mgmt_status_table[] = {
133 MGMT_STATUS_SUCCESS,
134 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
135 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
136 MGMT_STATUS_FAILED, /* Hardware Failure */
137 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
138 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
139 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
140 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
141 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
142 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
144 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
145 MGMT_STATUS_BUSY, /* Command Disallowed */
146 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
147 MGMT_STATUS_REJECTED, /* Rejected Security */
148 MGMT_STATUS_REJECTED, /* Rejected Personal */
149 MGMT_STATUS_TIMEOUT, /* Host Timeout */
150 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
151 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
152 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
153 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
154 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
155 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
156 MGMT_STATUS_BUSY, /* Repeated Attempts */
157 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
158 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
159 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
160 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
161 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
162 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
163 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
164 MGMT_STATUS_FAILED, /* Unspecified Error */
165 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
166 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
167 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
168 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
169 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
170 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
171 MGMT_STATUS_FAILED, /* Unit Link Key Used */
172 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
173 MGMT_STATUS_TIMEOUT, /* Instant Passed */
174 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
175 MGMT_STATUS_FAILED, /* Transaction Collision */
176 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
177 MGMT_STATUS_REJECTED, /* QoS Rejected */
178 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
179 MGMT_STATUS_REJECTED, /* Insufficient Security */
180 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
181 MGMT_STATUS_BUSY, /* Role Switch Pending */
182 MGMT_STATUS_FAILED, /* Slot Violation */
183 MGMT_STATUS_FAILED, /* Role Switch Failed */
184 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
185 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
186 MGMT_STATUS_BUSY, /* Host Busy Pairing */
187 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
188 MGMT_STATUS_BUSY, /* Controller Busy */
189 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
190 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
191 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
192 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
193 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
194 };
195
196 bool mgmt_valid_hdev(struct hci_dev *hdev)
197 {
198 return hdev->dev_type == HCI_BREDR;
199 }
200
201 static u8 mgmt_status(u8 hci_status)
202 {
203 if (hci_status < ARRAY_SIZE(mgmt_status_table))
204 return mgmt_status_table[hci_status];
205
206 return MGMT_STATUS_FAILED;
207 }
208
209 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
210 {
211 struct sk_buff *skb;
212 struct mgmt_hdr *hdr;
213 struct mgmt_ev_cmd_status *ev;
214 int err;
215
216 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
217
218 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
219 if (!skb)
220 return -ENOMEM;
221
222 hdr = (void *) skb_put(skb, sizeof(*hdr));
223
224 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
225 hdr->index = cpu_to_le16(index);
226 hdr->len = cpu_to_le16(sizeof(*ev));
227
228 ev = (void *) skb_put(skb, sizeof(*ev));
229 ev->status = status;
230 ev->opcode = cpu_to_le16(cmd);
231
232 err = sock_queue_rcv_skb(sk, skb);
233 if (err < 0)
234 kfree_skb(skb);
235
236 return err;
237 }
238
239 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
240 void *rp, size_t rp_len)
241 {
242 struct sk_buff *skb;
243 struct mgmt_hdr *hdr;
244 struct mgmt_ev_cmd_complete *ev;
245 int err;
246
247 BT_DBG("sock %p", sk);
248
249 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
250 if (!skb)
251 return -ENOMEM;
252
253 hdr = (void *) skb_put(skb, sizeof(*hdr));
254
255 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
256 hdr->index = cpu_to_le16(index);
257 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
258
259 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
260 ev->opcode = cpu_to_le16(cmd);
261 ev->status = status;
262
263 if (rp)
264 memcpy(ev->data, rp, rp_len);
265
266 err = sock_queue_rcv_skb(sk, skb);
267 if (err < 0)
268 kfree_skb(skb);
269
270 return err;
271 }
272
273 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
274 u16 data_len)
275 {
276 struct mgmt_rp_read_version rp;
277
278 BT_DBG("sock %p", sk);
279
280 rp.version = MGMT_VERSION;
281 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
282
283 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
284 sizeof(rp));
285 }
286
287 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
288 u16 data_len)
289 {
290 struct mgmt_rp_read_commands *rp;
291 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
292 const u16 num_events = ARRAY_SIZE(mgmt_events);
293 __le16 *opcode;
294 size_t rp_size;
295 int i, err;
296
297 BT_DBG("sock %p", sk);
298
299 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
300
301 rp = kmalloc(rp_size, GFP_KERNEL);
302 if (!rp)
303 return -ENOMEM;
304
305 rp->num_commands = __constant_cpu_to_le16(num_commands);
306 rp->num_events = __constant_cpu_to_le16(num_events);
307
308 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
309 put_unaligned_le16(mgmt_commands[i], opcode);
310
311 for (i = 0; i < num_events; i++, opcode++)
312 put_unaligned_le16(mgmt_events[i], opcode);
313
314 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
315 rp_size);
316 kfree(rp);
317
318 return err;
319 }
320
321 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
322 u16 data_len)
323 {
324 struct mgmt_rp_read_index_list *rp;
325 struct hci_dev *d;
326 size_t rp_len;
327 u16 count;
328 int err;
329
330 BT_DBG("sock %p", sk);
331
332 read_lock(&hci_dev_list_lock);
333
334 count = 0;
335 list_for_each_entry(d, &hci_dev_list, list) {
336 if (!mgmt_valid_hdev(d))
337 continue;
338
339 count++;
340 }
341
342 rp_len = sizeof(*rp) + (2 * count);
343 rp = kmalloc(rp_len, GFP_ATOMIC);
344 if (!rp) {
345 read_unlock(&hci_dev_list_lock);
346 return -ENOMEM;
347 }
348
349 count = 0;
350 list_for_each_entry(d, &hci_dev_list, list) {
351 if (test_bit(HCI_SETUP, &d->dev_flags))
352 continue;
353
354 if (!mgmt_valid_hdev(d))
355 continue;
356
357 rp->index[count++] = cpu_to_le16(d->id);
358 BT_DBG("Added hci%u", d->id);
359 }
360
361 rp->num_controllers = cpu_to_le16(count);
362 rp_len = sizeof(*rp) + (2 * count);
363
364 read_unlock(&hci_dev_list_lock);
365
366 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
367 rp_len);
368
369 kfree(rp);
370
371 return err;
372 }
373
374 static u32 get_supported_settings(struct hci_dev *hdev)
375 {
376 u32 settings = 0;
377
378 settings |= MGMT_SETTING_POWERED;
379 settings |= MGMT_SETTING_PAIRABLE;
380
381 if (lmp_ssp_capable(hdev))
382 settings |= MGMT_SETTING_SSP;
383
384 if (lmp_bredr_capable(hdev)) {
385 settings |= MGMT_SETTING_CONNECTABLE;
386 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
387 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE;
389 settings |= MGMT_SETTING_BREDR;
390 settings |= MGMT_SETTING_LINK_SECURITY;
391 }
392
393 if (enable_hs)
394 settings |= MGMT_SETTING_HS;
395
396 if (lmp_le_capable(hdev))
397 settings |= MGMT_SETTING_LE;
398
399 return settings;
400 }
401
402 static u32 get_current_settings(struct hci_dev *hdev)
403 {
404 u32 settings = 0;
405
406 if (hdev_is_powered(hdev))
407 settings |= MGMT_SETTING_POWERED;
408
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_CONNECTABLE;
411
412 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_FAST_CONNECTABLE;
414
415 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_DISCOVERABLE;
417
418 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
419 settings |= MGMT_SETTING_PAIRABLE;
420
421 if (lmp_bredr_capable(hdev))
422 settings |= MGMT_SETTING_BREDR;
423
424 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LE;
426
427 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
428 settings |= MGMT_SETTING_LINK_SECURITY;
429
430 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_SSP;
432
433 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
434 settings |= MGMT_SETTING_HS;
435
436 return settings;
437 }
438
439 #define PNP_INFO_SVCLASS_ID 0x1200
440
441 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
442 {
443 u8 *ptr = data, *uuids_start = NULL;
444 struct bt_uuid *uuid;
445
446 if (len < 4)
447 return ptr;
448
449 list_for_each_entry(uuid, &hdev->uuids, list) {
450 u16 uuid16;
451
452 if (uuid->size != 16)
453 continue;
454
455 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
456 if (uuid16 < 0x1100)
457 continue;
458
459 if (uuid16 == PNP_INFO_SVCLASS_ID)
460 continue;
461
462 if (!uuids_start) {
463 uuids_start = ptr;
464 uuids_start[0] = 1;
465 uuids_start[1] = EIR_UUID16_ALL;
466 ptr += 2;
467 }
468
469 /* Stop if not enough space to put next UUID */
470 if ((ptr - data) + sizeof(u16) > len) {
471 uuids_start[1] = EIR_UUID16_SOME;
472 break;
473 }
474
475 *ptr++ = (uuid16 & 0x00ff);
476 *ptr++ = (uuid16 & 0xff00) >> 8;
477 uuids_start[0] += sizeof(uuid16);
478 }
479
480 return ptr;
481 }
482
483 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
484 {
485 u8 *ptr = data, *uuids_start = NULL;
486 struct bt_uuid *uuid;
487
488 if (len < 6)
489 return ptr;
490
491 list_for_each_entry(uuid, &hdev->uuids, list) {
492 if (uuid->size != 32)
493 continue;
494
495 if (!uuids_start) {
496 uuids_start = ptr;
497 uuids_start[0] = 1;
498 uuids_start[1] = EIR_UUID32_ALL;
499 ptr += 2;
500 }
501
502 /* Stop if not enough space to put next UUID */
503 if ((ptr - data) + sizeof(u32) > len) {
504 uuids_start[1] = EIR_UUID32_SOME;
505 break;
506 }
507
508 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
509 ptr += sizeof(u32);
510 uuids_start[0] += sizeof(u32);
511 }
512
513 return ptr;
514 }
515
516 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
517 {
518 u8 *ptr = data, *uuids_start = NULL;
519 struct bt_uuid *uuid;
520
521 if (len < 18)
522 return ptr;
523
524 list_for_each_entry(uuid, &hdev->uuids, list) {
525 if (uuid->size != 128)
526 continue;
527
528 if (!uuids_start) {
529 uuids_start = ptr;
530 uuids_start[0] = 1;
531 uuids_start[1] = EIR_UUID128_ALL;
532 ptr += 2;
533 }
534
535 /* Stop if not enough space to put next UUID */
536 if ((ptr - data) + 16 > len) {
537 uuids_start[1] = EIR_UUID128_SOME;
538 break;
539 }
540
541 memcpy(ptr, uuid->uuid, 16);
542 ptr += 16;
543 uuids_start[0] += 16;
544 }
545
546 return ptr;
547 }
548
549 static void create_eir(struct hci_dev *hdev, u8 *data)
550 {
551 u8 *ptr = data;
552 size_t name_len;
553
554 name_len = strlen(hdev->dev_name);
555
556 if (name_len > 0) {
557 /* EIR Data type */
558 if (name_len > 48) {
559 name_len = 48;
560 ptr[1] = EIR_NAME_SHORT;
561 } else
562 ptr[1] = EIR_NAME_COMPLETE;
563
564 /* EIR Data length */
565 ptr[0] = name_len + 1;
566
567 memcpy(ptr + 2, hdev->dev_name, name_len);
568
569 ptr += (name_len + 2);
570 }
571
572 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
573 ptr[0] = 2;
574 ptr[1] = EIR_TX_POWER;
575 ptr[2] = (u8) hdev->inq_tx_power;
576
577 ptr += 3;
578 }
579
580 if (hdev->devid_source > 0) {
581 ptr[0] = 9;
582 ptr[1] = EIR_DEVICE_ID;
583
584 put_unaligned_le16(hdev->devid_source, ptr + 2);
585 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
586 put_unaligned_le16(hdev->devid_product, ptr + 6);
587 put_unaligned_le16(hdev->devid_version, ptr + 8);
588
589 ptr += 10;
590 }
591
592 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
593 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
595 }
596
597 static void update_eir(struct hci_request *req)
598 {
599 struct hci_dev *hdev = req->hdev;
600 struct hci_cp_write_eir cp;
601
602 if (!hdev_is_powered(hdev))
603 return;
604
605 if (!lmp_ext_inq_capable(hdev))
606 return;
607
608 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
609 return;
610
611 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
612 return;
613
614 memset(&cp, 0, sizeof(cp));
615
616 create_eir(hdev, cp.data);
617
618 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
619 return;
620
621 memcpy(hdev->eir, cp.data, sizeof(cp.data));
622
623 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
624 }
625
626 static u8 get_service_classes(struct hci_dev *hdev)
627 {
628 struct bt_uuid *uuid;
629 u8 val = 0;
630
631 list_for_each_entry(uuid, &hdev->uuids, list)
632 val |= uuid->svc_hint;
633
634 return val;
635 }
636
637 static void update_class(struct hci_request *req)
638 {
639 struct hci_dev *hdev = req->hdev;
640 u8 cod[3];
641
642 BT_DBG("%s", hdev->name);
643
644 if (!hdev_is_powered(hdev))
645 return;
646
647 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
648 return;
649
650 cod[0] = hdev->minor_class;
651 cod[1] = hdev->major_class;
652 cod[2] = get_service_classes(hdev);
653
654 if (memcmp(cod, hdev->dev_class, 3) == 0)
655 return;
656
657 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
658 }
659
660 static void service_cache_off(struct work_struct *work)
661 {
662 struct hci_dev *hdev = container_of(work, struct hci_dev,
663 service_cache.work);
664 struct hci_request req;
665
666 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
667 return;
668
669 hci_req_init(&req, hdev);
670
671 hci_dev_lock(hdev);
672
673 update_eir(&req);
674 update_class(&req);
675
676 hci_dev_unlock(hdev);
677
678 hci_req_run(&req, NULL);
679 }
680
681 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
682 {
683 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
684 return;
685
686 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
687
688 /* Non-mgmt controlled devices get this bit set
689 * implicitly so that pairing works for them, however
690 * for mgmt we require user-space to explicitly enable
691 * it
692 */
693 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
694 }
695
696 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
697 void *data, u16 data_len)
698 {
699 struct mgmt_rp_read_info rp;
700
701 BT_DBG("sock %p %s", sk, hdev->name);
702
703 hci_dev_lock(hdev);
704
705 memset(&rp, 0, sizeof(rp));
706
707 bacpy(&rp.bdaddr, &hdev->bdaddr);
708
709 rp.version = hdev->hci_ver;
710 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
711
712 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
713 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
714
715 memcpy(rp.dev_class, hdev->dev_class, 3);
716
717 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
718 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
719
720 hci_dev_unlock(hdev);
721
722 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
723 sizeof(rp));
724 }
725
726 static void mgmt_pending_free(struct pending_cmd *cmd)
727 {
728 sock_put(cmd->sk);
729 kfree(cmd->param);
730 kfree(cmd);
731 }
732
733 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
734 struct hci_dev *hdev, void *data,
735 u16 len)
736 {
737 struct pending_cmd *cmd;
738
739 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
740 if (!cmd)
741 return NULL;
742
743 cmd->opcode = opcode;
744 cmd->index = hdev->id;
745
746 cmd->param = kmalloc(len, GFP_KERNEL);
747 if (!cmd->param) {
748 kfree(cmd);
749 return NULL;
750 }
751
752 if (data)
753 memcpy(cmd->param, data, len);
754
755 cmd->sk = sk;
756 sock_hold(sk);
757
758 list_add(&cmd->list, &hdev->mgmt_pending);
759
760 return cmd;
761 }
762
763 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
764 void (*cb)(struct pending_cmd *cmd,
765 void *data),
766 void *data)
767 {
768 struct pending_cmd *cmd, *tmp;
769
770 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
771 if (opcode > 0 && cmd->opcode != opcode)
772 continue;
773
774 cb(cmd, data);
775 }
776 }
777
778 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
779 {
780 struct pending_cmd *cmd;
781
782 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
783 if (cmd->opcode == opcode)
784 return cmd;
785 }
786
787 return NULL;
788 }
789
790 static void mgmt_pending_remove(struct pending_cmd *cmd)
791 {
792 list_del(&cmd->list);
793 mgmt_pending_free(cmd);
794 }
795
796 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
797 {
798 __le32 settings = cpu_to_le32(get_current_settings(hdev));
799
800 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
801 sizeof(settings));
802 }
803
804 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
805 u16 len)
806 {
807 struct mgmt_mode *cp = data;
808 struct pending_cmd *cmd;
809 int err;
810
811 BT_DBG("request for %s", hdev->name);
812
813 if (cp->val != 0x00 && cp->val != 0x01)
814 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
815 MGMT_STATUS_INVALID_PARAMS);
816
817 hci_dev_lock(hdev);
818
819 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
820 cancel_delayed_work(&hdev->power_off);
821
822 if (cp->val) {
823 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
824 data, len);
825 err = mgmt_powered(hdev, 1);
826 goto failed;
827 }
828 }
829
830 if (!!cp->val == hdev_is_powered(hdev)) {
831 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
832 goto failed;
833 }
834
835 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
836 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
837 MGMT_STATUS_BUSY);
838 goto failed;
839 }
840
841 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
842 if (!cmd) {
843 err = -ENOMEM;
844 goto failed;
845 }
846
847 if (cp->val)
848 queue_work(hdev->req_workqueue, &hdev->power_on);
849 else
850 queue_work(hdev->req_workqueue, &hdev->power_off.work);
851
852 err = 0;
853
854 failed:
855 hci_dev_unlock(hdev);
856 return err;
857 }
858
859 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
860 struct sock *skip_sk)
861 {
862 struct sk_buff *skb;
863 struct mgmt_hdr *hdr;
864
865 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
866 if (!skb)
867 return -ENOMEM;
868
869 hdr = (void *) skb_put(skb, sizeof(*hdr));
870 hdr->opcode = cpu_to_le16(event);
871 if (hdev)
872 hdr->index = cpu_to_le16(hdev->id);
873 else
874 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
875 hdr->len = cpu_to_le16(data_len);
876
877 if (data)
878 memcpy(skb_put(skb, data_len), data, data_len);
879
880 /* Time stamp */
881 __net_timestamp(skb);
882
883 hci_send_to_control(skb, skip_sk);
884 kfree_skb(skb);
885
886 return 0;
887 }
888
889 static int new_settings(struct hci_dev *hdev, struct sock *skip)
890 {
891 __le32 ev;
892
893 ev = cpu_to_le32(get_current_settings(hdev));
894
895 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
896 }
897
898 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
899 u16 len)
900 {
901 struct mgmt_cp_set_discoverable *cp = data;
902 struct pending_cmd *cmd;
903 u16 timeout;
904 u8 scan;
905 int err;
906
907 BT_DBG("request for %s", hdev->name);
908
909 if (!lmp_bredr_capable(hdev))
910 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
911 MGMT_STATUS_NOT_SUPPORTED);
912
913 if (cp->val != 0x00 && cp->val != 0x01)
914 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
915 MGMT_STATUS_INVALID_PARAMS);
916
917 timeout = __le16_to_cpu(cp->timeout);
918 if (!cp->val && timeout > 0)
919 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
920 MGMT_STATUS_INVALID_PARAMS);
921
922 hci_dev_lock(hdev);
923
924 if (!hdev_is_powered(hdev) && timeout > 0) {
925 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
926 MGMT_STATUS_NOT_POWERED);
927 goto failed;
928 }
929
930 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
931 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
932 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
933 MGMT_STATUS_BUSY);
934 goto failed;
935 }
936
937 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
938 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
939 MGMT_STATUS_REJECTED);
940 goto failed;
941 }
942
943 if (!hdev_is_powered(hdev)) {
944 bool changed = false;
945
946 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
947 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
948 changed = true;
949 }
950
951 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
952 if (err < 0)
953 goto failed;
954
955 if (changed)
956 err = new_settings(hdev, sk);
957
958 goto failed;
959 }
960
961 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
962 if (hdev->discov_timeout > 0) {
963 cancel_delayed_work(&hdev->discov_off);
964 hdev->discov_timeout = 0;
965 }
966
967 if (cp->val && timeout > 0) {
968 hdev->discov_timeout = timeout;
969 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
970 msecs_to_jiffies(hdev->discov_timeout * 1000));
971 }
972
973 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
974 goto failed;
975 }
976
977 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
978 if (!cmd) {
979 err = -ENOMEM;
980 goto failed;
981 }
982
983 scan = SCAN_PAGE;
984
985 if (cp->val)
986 scan |= SCAN_INQUIRY;
987 else
988 cancel_delayed_work(&hdev->discov_off);
989
990 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
991 if (err < 0)
992 mgmt_pending_remove(cmd);
993
994 if (cp->val)
995 hdev->discov_timeout = timeout;
996
997 failed:
998 hci_dev_unlock(hdev);
999 return err;
1000 }
1001
1002 static void write_fast_connectable(struct hci_request *req, bool enable)
1003 {
1004 struct hci_dev *hdev = req->hdev;
1005 struct hci_cp_write_page_scan_activity acp;
1006 u8 type;
1007
1008 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1009 return;
1010
1011 if (enable) {
1012 type = PAGE_SCAN_TYPE_INTERLACED;
1013
1014 /* 160 msec page scan interval */
1015 acp.interval = __constant_cpu_to_le16(0x0100);
1016 } else {
1017 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1018
1019 /* default 1.28 sec page scan */
1020 acp.interval = __constant_cpu_to_le16(0x0800);
1021 }
1022
1023 acp.window = __constant_cpu_to_le16(0x0012);
1024
1025 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1026 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1027 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1028 sizeof(acp), &acp);
1029
1030 if (hdev->page_scan_type != type)
1031 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1032 }
1033
1034 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1035 {
1036 struct pending_cmd *cmd;
1037
1038 BT_DBG("status 0x%02x", status);
1039
1040 hci_dev_lock(hdev);
1041
1042 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1043 if (!cmd)
1044 goto unlock;
1045
1046 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1047
1048 mgmt_pending_remove(cmd);
1049
1050 unlock:
1051 hci_dev_unlock(hdev);
1052 }
1053
1054 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1055 u16 len)
1056 {
1057 struct mgmt_mode *cp = data;
1058 struct pending_cmd *cmd;
1059 struct hci_request req;
1060 u8 scan;
1061 int err;
1062
1063 BT_DBG("request for %s", hdev->name);
1064
1065 if (!lmp_bredr_capable(hdev))
1066 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1067 MGMT_STATUS_NOT_SUPPORTED);
1068
1069 if (cp->val != 0x00 && cp->val != 0x01)
1070 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1071 MGMT_STATUS_INVALID_PARAMS);
1072
1073 hci_dev_lock(hdev);
1074
1075 if (!hdev_is_powered(hdev)) {
1076 bool changed = false;
1077
1078 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1079 changed = true;
1080
1081 if (cp->val) {
1082 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1083 } else {
1084 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1085 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1086 }
1087
1088 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1089 if (err < 0)
1090 goto failed;
1091
1092 if (changed)
1093 err = new_settings(hdev, sk);
1094
1095 goto failed;
1096 }
1097
1098 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1099 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1100 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1101 MGMT_STATUS_BUSY);
1102 goto failed;
1103 }
1104
1105 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1106 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1107 goto failed;
1108 }
1109
1110 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1111 if (!cmd) {
1112 err = -ENOMEM;
1113 goto failed;
1114 }
1115
1116 if (cp->val) {
1117 scan = SCAN_PAGE;
1118 } else {
1119 scan = 0;
1120
1121 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1122 hdev->discov_timeout > 0)
1123 cancel_delayed_work(&hdev->discov_off);
1124 }
1125
1126 hci_req_init(&req, hdev);
1127
1128 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1129
1130 /* If we're going from non-connectable to connectable or
1131 * vice-versa when fast connectable is enabled ensure that fast
1132 * connectable gets disabled. write_fast_connectable won't do
1133 * anything if the page scan parameters are already what they
1134 * should be.
1135 */
1136 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1137 write_fast_connectable(&req, false);
1138
1139 err = hci_req_run(&req, set_connectable_complete);
1140 if (err < 0)
1141 mgmt_pending_remove(cmd);
1142
1143 failed:
1144 hci_dev_unlock(hdev);
1145 return err;
1146 }
1147
1148 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1149 u16 len)
1150 {
1151 struct mgmt_mode *cp = data;
1152 int err;
1153
1154 BT_DBG("request for %s", hdev->name);
1155
1156 if (cp->val != 0x00 && cp->val != 0x01)
1157 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1158 MGMT_STATUS_INVALID_PARAMS);
1159
1160 hci_dev_lock(hdev);
1161
1162 if (cp->val)
1163 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1164 else
1165 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1166
1167 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1168 if (err < 0)
1169 goto failed;
1170
1171 err = new_settings(hdev, sk);
1172
1173 failed:
1174 hci_dev_unlock(hdev);
1175 return err;
1176 }
1177
1178 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1179 u16 len)
1180 {
1181 struct mgmt_mode *cp = data;
1182 struct pending_cmd *cmd;
1183 u8 val;
1184 int err;
1185
1186 BT_DBG("request for %s", hdev->name);
1187
1188 if (!lmp_bredr_capable(hdev))
1189 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1190 MGMT_STATUS_NOT_SUPPORTED);
1191
1192 if (cp->val != 0x00 && cp->val != 0x01)
1193 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1194 MGMT_STATUS_INVALID_PARAMS);
1195
1196 hci_dev_lock(hdev);
1197
1198 if (!hdev_is_powered(hdev)) {
1199 bool changed = false;
1200
1201 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1202 &hdev->dev_flags)) {
1203 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1204 changed = true;
1205 }
1206
1207 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1208 if (err < 0)
1209 goto failed;
1210
1211 if (changed)
1212 err = new_settings(hdev, sk);
1213
1214 goto failed;
1215 }
1216
1217 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1218 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1219 MGMT_STATUS_BUSY);
1220 goto failed;
1221 }
1222
1223 val = !!cp->val;
1224
1225 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1226 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1227 goto failed;
1228 }
1229
1230 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1231 if (!cmd) {
1232 err = -ENOMEM;
1233 goto failed;
1234 }
1235
1236 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1237 if (err < 0) {
1238 mgmt_pending_remove(cmd);
1239 goto failed;
1240 }
1241
1242 failed:
1243 hci_dev_unlock(hdev);
1244 return err;
1245 }
1246
1247 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1248 {
1249 struct mgmt_mode *cp = data;
1250 struct pending_cmd *cmd;
1251 u8 val;
1252 int err;
1253
1254 BT_DBG("request for %s", hdev->name);
1255
1256 if (!lmp_ssp_capable(hdev))
1257 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1258 MGMT_STATUS_NOT_SUPPORTED);
1259
1260 if (cp->val != 0x00 && cp->val != 0x01)
1261 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1262 MGMT_STATUS_INVALID_PARAMS);
1263
1264 hci_dev_lock(hdev);
1265
1266 val = !!cp->val;
1267
1268 if (!hdev_is_powered(hdev)) {
1269 bool changed = false;
1270
1271 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1272 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1273 changed = true;
1274 }
1275
1276 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1277 if (err < 0)
1278 goto failed;
1279
1280 if (changed)
1281 err = new_settings(hdev, sk);
1282
1283 goto failed;
1284 }
1285
1286 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1287 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1288 MGMT_STATUS_BUSY);
1289 goto failed;
1290 }
1291
1292 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1293 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1294 goto failed;
1295 }
1296
1297 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1298 if (!cmd) {
1299 err = -ENOMEM;
1300 goto failed;
1301 }
1302
1303 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1304 if (err < 0) {
1305 mgmt_pending_remove(cmd);
1306 goto failed;
1307 }
1308
1309 failed:
1310 hci_dev_unlock(hdev);
1311 return err;
1312 }
1313
1314 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1315 {
1316 struct mgmt_mode *cp = data;
1317
1318 BT_DBG("request for %s", hdev->name);
1319
1320 if (!enable_hs)
1321 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1322 MGMT_STATUS_NOT_SUPPORTED);
1323
1324 if (cp->val != 0x00 && cp->val != 0x01)
1325 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1326 MGMT_STATUS_INVALID_PARAMS);
1327
1328 if (cp->val)
1329 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1330 else
1331 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1332
1333 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1334 }
1335
1336 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1337 {
1338 struct mgmt_mode *cp = data;
1339 struct hci_cp_write_le_host_supported hci_cp;
1340 struct pending_cmd *cmd;
1341 int err;
1342 u8 val, enabled;
1343
1344 BT_DBG("request for %s", hdev->name);
1345
1346 if (!lmp_le_capable(hdev))
1347 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1348 MGMT_STATUS_NOT_SUPPORTED);
1349
1350 if (cp->val != 0x00 && cp->val != 0x01)
1351 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1352 MGMT_STATUS_INVALID_PARAMS);
1353
1354 hci_dev_lock(hdev);
1355
1356 val = !!cp->val;
1357 enabled = lmp_host_le_capable(hdev);
1358
1359 if (!hdev_is_powered(hdev) || val == enabled) {
1360 bool changed = false;
1361
1362 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1363 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1364 changed = true;
1365 }
1366
1367 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1368 if (err < 0)
1369 goto unlock;
1370
1371 if (changed)
1372 err = new_settings(hdev, sk);
1373
1374 goto unlock;
1375 }
1376
1377 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1378 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1379 MGMT_STATUS_BUSY);
1380 goto unlock;
1381 }
1382
1383 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1384 if (!cmd) {
1385 err = -ENOMEM;
1386 goto unlock;
1387 }
1388
1389 memset(&hci_cp, 0, sizeof(hci_cp));
1390
1391 if (val) {
1392 hci_cp.le = val;
1393 hci_cp.simul = lmp_le_br_capable(hdev);
1394 }
1395
1396 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1397 &hci_cp);
1398 if (err < 0)
1399 mgmt_pending_remove(cmd);
1400
1401 unlock:
1402 hci_dev_unlock(hdev);
1403 return err;
1404 }
1405
1406 /* This is a helper function to test for pending mgmt commands that can
1407 * cause CoD or EIR HCI commands. We can only allow one such pending
1408 * mgmt command at a time since otherwise we cannot easily track what
1409 * the current values are, will be, and based on that calculate if a new
1410 * HCI command needs to be sent and if yes with what value.
1411 */
1412 static bool pending_eir_or_class(struct hci_dev *hdev)
1413 {
1414 struct pending_cmd *cmd;
1415
1416 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1417 switch (cmd->opcode) {
1418 case MGMT_OP_ADD_UUID:
1419 case MGMT_OP_REMOVE_UUID:
1420 case MGMT_OP_SET_DEV_CLASS:
1421 case MGMT_OP_SET_POWERED:
1422 return true;
1423 }
1424 }
1425
1426 return false;
1427 }
1428
1429 static const u8 bluetooth_base_uuid[] = {
1430 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1431 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1432 };
1433
1434 static u8 get_uuid_size(const u8 *uuid)
1435 {
1436 u32 val;
1437
1438 if (memcmp(uuid, bluetooth_base_uuid, 12))
1439 return 128;
1440
1441 val = get_unaligned_le32(&uuid[12]);
1442 if (val > 0xffff)
1443 return 32;
1444
1445 return 16;
1446 }
1447
1448 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1449 {
1450 struct pending_cmd *cmd;
1451
1452 hci_dev_lock(hdev);
1453
1454 cmd = mgmt_pending_find(mgmt_op, hdev);
1455 if (!cmd)
1456 goto unlock;
1457
1458 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1459 hdev->dev_class, 3);
1460
1461 mgmt_pending_remove(cmd);
1462
1463 unlock:
1464 hci_dev_unlock(hdev);
1465 }
1466
1467 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1468 {
1469 BT_DBG("status 0x%02x", status);
1470
1471 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1472 }
1473
1474 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1475 {
1476 struct mgmt_cp_add_uuid *cp = data;
1477 struct pending_cmd *cmd;
1478 struct hci_request req;
1479 struct bt_uuid *uuid;
1480 int err;
1481
1482 BT_DBG("request for %s", hdev->name);
1483
1484 hci_dev_lock(hdev);
1485
1486 if (pending_eir_or_class(hdev)) {
1487 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1488 MGMT_STATUS_BUSY);
1489 goto failed;
1490 }
1491
1492 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1493 if (!uuid) {
1494 err = -ENOMEM;
1495 goto failed;
1496 }
1497
1498 memcpy(uuid->uuid, cp->uuid, 16);
1499 uuid->svc_hint = cp->svc_hint;
1500 uuid->size = get_uuid_size(cp->uuid);
1501
1502 list_add_tail(&uuid->list, &hdev->uuids);
1503
1504 hci_req_init(&req, hdev);
1505
1506 update_class(&req);
1507 update_eir(&req);
1508
1509 err = hci_req_run(&req, add_uuid_complete);
1510 if (err < 0) {
1511 if (err != -ENODATA)
1512 goto failed;
1513
1514 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1515 hdev->dev_class, 3);
1516 goto failed;
1517 }
1518
1519 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1520 if (!cmd) {
1521 err = -ENOMEM;
1522 goto failed;
1523 }
1524
1525 err = 0;
1526
1527 failed:
1528 hci_dev_unlock(hdev);
1529 return err;
1530 }
1531
1532 static bool enable_service_cache(struct hci_dev *hdev)
1533 {
1534 if (!hdev_is_powered(hdev))
1535 return false;
1536
1537 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1538 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1539 CACHE_TIMEOUT);
1540 return true;
1541 }
1542
1543 return false;
1544 }
1545
1546 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1547 {
1548 BT_DBG("status 0x%02x", status);
1549
1550 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1551 }
1552
1553 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1554 u16 len)
1555 {
1556 struct mgmt_cp_remove_uuid *cp = data;
1557 struct pending_cmd *cmd;
1558 struct bt_uuid *match, *tmp;
1559 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1560 struct hci_request req;
1561 int err, found;
1562
1563 BT_DBG("request for %s", hdev->name);
1564
1565 hci_dev_lock(hdev);
1566
1567 if (pending_eir_or_class(hdev)) {
1568 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1569 MGMT_STATUS_BUSY);
1570 goto unlock;
1571 }
1572
1573 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1574 err = hci_uuids_clear(hdev);
1575
1576 if (enable_service_cache(hdev)) {
1577 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1578 0, hdev->dev_class, 3);
1579 goto unlock;
1580 }
1581
1582 goto update_class;
1583 }
1584
1585 found = 0;
1586
1587 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1588 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1589 continue;
1590
1591 list_del(&match->list);
1592 kfree(match);
1593 found++;
1594 }
1595
1596 if (found == 0) {
1597 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1598 MGMT_STATUS_INVALID_PARAMS);
1599 goto unlock;
1600 }
1601
1602 update_class:
1603 hci_req_init(&req, hdev);
1604
1605 update_class(&req);
1606 update_eir(&req);
1607
1608 err = hci_req_run(&req, remove_uuid_complete);
1609 if (err < 0) {
1610 if (err != -ENODATA)
1611 goto unlock;
1612
1613 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1614 hdev->dev_class, 3);
1615 goto unlock;
1616 }
1617
1618 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1619 if (!cmd) {
1620 err = -ENOMEM;
1621 goto unlock;
1622 }
1623
1624 err = 0;
1625
1626 unlock:
1627 hci_dev_unlock(hdev);
1628 return err;
1629 }
1630
1631 static void set_class_complete(struct hci_dev *hdev, u8 status)
1632 {
1633 BT_DBG("status 0x%02x", status);
1634
1635 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1636 }
1637
1638 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1639 u16 len)
1640 {
1641 struct mgmt_cp_set_dev_class *cp = data;
1642 struct pending_cmd *cmd;
1643 struct hci_request req;
1644 int err;
1645
1646 BT_DBG("request for %s", hdev->name);
1647
1648 if (!lmp_bredr_capable(hdev))
1649 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1650 MGMT_STATUS_NOT_SUPPORTED);
1651
1652 hci_dev_lock(hdev);
1653
1654 if (pending_eir_or_class(hdev)) {
1655 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1656 MGMT_STATUS_BUSY);
1657 goto unlock;
1658 }
1659
1660 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1661 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1662 MGMT_STATUS_INVALID_PARAMS);
1663 goto unlock;
1664 }
1665
1666 hdev->major_class = cp->major;
1667 hdev->minor_class = cp->minor;
1668
1669 if (!hdev_is_powered(hdev)) {
1670 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1671 hdev->dev_class, 3);
1672 goto unlock;
1673 }
1674
1675 hci_req_init(&req, hdev);
1676
1677 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1678 hci_dev_unlock(hdev);
1679 cancel_delayed_work_sync(&hdev->service_cache);
1680 hci_dev_lock(hdev);
1681 update_eir(&req);
1682 }
1683
1684 update_class(&req);
1685
1686 err = hci_req_run(&req, set_class_complete);
1687 if (err < 0) {
1688 if (err != -ENODATA)
1689 goto unlock;
1690
1691 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1692 hdev->dev_class, 3);
1693 goto unlock;
1694 }
1695
1696 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1697 if (!cmd) {
1698 err = -ENOMEM;
1699 goto unlock;
1700 }
1701
1702 err = 0;
1703
1704 unlock:
1705 hci_dev_unlock(hdev);
1706 return err;
1707 }
1708
1709 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1710 u16 len)
1711 {
1712 struct mgmt_cp_load_link_keys *cp = data;
1713 u16 key_count, expected_len;
1714 int i;
1715
1716 key_count = __le16_to_cpu(cp->key_count);
1717
1718 expected_len = sizeof(*cp) + key_count *
1719 sizeof(struct mgmt_link_key_info);
1720 if (expected_len != len) {
1721 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1722 len, expected_len);
1723 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1724 MGMT_STATUS_INVALID_PARAMS);
1725 }
1726
1727 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1728 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1729 MGMT_STATUS_INVALID_PARAMS);
1730
1731 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1732 key_count);
1733
1734 for (i = 0; i < key_count; i++) {
1735 struct mgmt_link_key_info *key = &cp->keys[i];
1736
1737 if (key->addr.type != BDADDR_BREDR)
1738 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1739 MGMT_STATUS_INVALID_PARAMS);
1740 }
1741
1742 hci_dev_lock(hdev);
1743
1744 hci_link_keys_clear(hdev);
1745
1746 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1747
1748 if (cp->debug_keys)
1749 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1750 else
1751 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1752
1753 for (i = 0; i < key_count; i++) {
1754 struct mgmt_link_key_info *key = &cp->keys[i];
1755
1756 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1757 key->type, key->pin_len);
1758 }
1759
1760 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1761
1762 hci_dev_unlock(hdev);
1763
1764 return 0;
1765 }
1766
1767 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1768 u8 addr_type, struct sock *skip_sk)
1769 {
1770 struct mgmt_ev_device_unpaired ev;
1771
1772 bacpy(&ev.addr.bdaddr, bdaddr);
1773 ev.addr.type = addr_type;
1774
1775 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1776 skip_sk);
1777 }
1778
1779 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1780 u16 len)
1781 {
1782 struct mgmt_cp_unpair_device *cp = data;
1783 struct mgmt_rp_unpair_device rp;
1784 struct hci_cp_disconnect dc;
1785 struct pending_cmd *cmd;
1786 struct hci_conn *conn;
1787 int err;
1788
1789 memset(&rp, 0, sizeof(rp));
1790 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1791 rp.addr.type = cp->addr.type;
1792
1793 if (!bdaddr_type_is_valid(cp->addr.type))
1794 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1795 MGMT_STATUS_INVALID_PARAMS,
1796 &rp, sizeof(rp));
1797
1798 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1799 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1800 MGMT_STATUS_INVALID_PARAMS,
1801 &rp, sizeof(rp));
1802
1803 hci_dev_lock(hdev);
1804
1805 if (!hdev_is_powered(hdev)) {
1806 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1807 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1808 goto unlock;
1809 }
1810
1811 if (cp->addr.type == BDADDR_BREDR)
1812 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1813 else
1814 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1815
1816 if (err < 0) {
1817 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1818 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1819 goto unlock;
1820 }
1821
1822 if (cp->disconnect) {
1823 if (cp->addr.type == BDADDR_BREDR)
1824 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1825 &cp->addr.bdaddr);
1826 else
1827 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1828 &cp->addr.bdaddr);
1829 } else {
1830 conn = NULL;
1831 }
1832
1833 if (!conn) {
1834 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1835 &rp, sizeof(rp));
1836 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1837 goto unlock;
1838 }
1839
1840 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1841 sizeof(*cp));
1842 if (!cmd) {
1843 err = -ENOMEM;
1844 goto unlock;
1845 }
1846
1847 dc.handle = cpu_to_le16(conn->handle);
1848 dc.reason = 0x13; /* Remote User Terminated Connection */
1849 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1850 if (err < 0)
1851 mgmt_pending_remove(cmd);
1852
1853 unlock:
1854 hci_dev_unlock(hdev);
1855 return err;
1856 }
1857
1858 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1859 u16 len)
1860 {
1861 struct mgmt_cp_disconnect *cp = data;
1862 struct mgmt_rp_disconnect rp;
1863 struct hci_cp_disconnect dc;
1864 struct pending_cmd *cmd;
1865 struct hci_conn *conn;
1866 int err;
1867
1868 BT_DBG("");
1869
1870 memset(&rp, 0, sizeof(rp));
1871 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1872 rp.addr.type = cp->addr.type;
1873
1874 if (!bdaddr_type_is_valid(cp->addr.type))
1875 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1876 MGMT_STATUS_INVALID_PARAMS,
1877 &rp, sizeof(rp));
1878
1879 hci_dev_lock(hdev);
1880
1881 if (!test_bit(HCI_UP, &hdev->flags)) {
1882 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1883 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1884 goto failed;
1885 }
1886
1887 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1888 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1889 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1890 goto failed;
1891 }
1892
1893 if (cp->addr.type == BDADDR_BREDR)
1894 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1895 &cp->addr.bdaddr);
1896 else
1897 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1898
1899 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1900 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1901 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1902 goto failed;
1903 }
1904
1905 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1906 if (!cmd) {
1907 err = -ENOMEM;
1908 goto failed;
1909 }
1910
1911 dc.handle = cpu_to_le16(conn->handle);
1912 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1913
1914 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1915 if (err < 0)
1916 mgmt_pending_remove(cmd);
1917
1918 failed:
1919 hci_dev_unlock(hdev);
1920 return err;
1921 }
1922
1923 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1924 {
1925 switch (link_type) {
1926 case LE_LINK:
1927 switch (addr_type) {
1928 case ADDR_LE_DEV_PUBLIC:
1929 return BDADDR_LE_PUBLIC;
1930
1931 default:
1932 /* Fallback to LE Random address type */
1933 return BDADDR_LE_RANDOM;
1934 }
1935
1936 default:
1937 /* Fallback to BR/EDR type */
1938 return BDADDR_BREDR;
1939 }
1940 }
1941
1942 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1943 u16 data_len)
1944 {
1945 struct mgmt_rp_get_connections *rp;
1946 struct hci_conn *c;
1947 size_t rp_len;
1948 int err;
1949 u16 i;
1950
1951 BT_DBG("");
1952
1953 hci_dev_lock(hdev);
1954
1955 if (!hdev_is_powered(hdev)) {
1956 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1957 MGMT_STATUS_NOT_POWERED);
1958 goto unlock;
1959 }
1960
1961 i = 0;
1962 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1963 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1964 i++;
1965 }
1966
1967 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1968 rp = kmalloc(rp_len, GFP_KERNEL);
1969 if (!rp) {
1970 err = -ENOMEM;
1971 goto unlock;
1972 }
1973
1974 i = 0;
1975 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1976 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1977 continue;
1978 bacpy(&rp->addr[i].bdaddr, &c->dst);
1979 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1980 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1981 continue;
1982 i++;
1983 }
1984
1985 rp->conn_count = cpu_to_le16(i);
1986
1987 /* Recalculate length in case of filtered SCO connections, etc */
1988 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1989
1990 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1991 rp_len);
1992
1993 kfree(rp);
1994
1995 unlock:
1996 hci_dev_unlock(hdev);
1997 return err;
1998 }
1999
2000 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2001 struct mgmt_cp_pin_code_neg_reply *cp)
2002 {
2003 struct pending_cmd *cmd;
2004 int err;
2005
2006 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2007 sizeof(*cp));
2008 if (!cmd)
2009 return -ENOMEM;
2010
2011 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2012 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2013 if (err < 0)
2014 mgmt_pending_remove(cmd);
2015
2016 return err;
2017 }
2018
2019 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2020 u16 len)
2021 {
2022 struct hci_conn *conn;
2023 struct mgmt_cp_pin_code_reply *cp = data;
2024 struct hci_cp_pin_code_reply reply;
2025 struct pending_cmd *cmd;
2026 int err;
2027
2028 BT_DBG("");
2029
2030 hci_dev_lock(hdev);
2031
2032 if (!hdev_is_powered(hdev)) {
2033 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2034 MGMT_STATUS_NOT_POWERED);
2035 goto failed;
2036 }
2037
2038 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2039 if (!conn) {
2040 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2041 MGMT_STATUS_NOT_CONNECTED);
2042 goto failed;
2043 }
2044
2045 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2046 struct mgmt_cp_pin_code_neg_reply ncp;
2047
2048 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2049
2050 BT_ERR("PIN code is not 16 bytes long");
2051
2052 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2053 if (err >= 0)
2054 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2055 MGMT_STATUS_INVALID_PARAMS);
2056
2057 goto failed;
2058 }
2059
2060 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2061 if (!cmd) {
2062 err = -ENOMEM;
2063 goto failed;
2064 }
2065
2066 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2067 reply.pin_len = cp->pin_len;
2068 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2069
2070 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2071 if (err < 0)
2072 mgmt_pending_remove(cmd);
2073
2074 failed:
2075 hci_dev_unlock(hdev);
2076 return err;
2077 }
2078
2079 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2080 u16 len)
2081 {
2082 struct mgmt_cp_set_io_capability *cp = data;
2083
2084 BT_DBG("");
2085
2086 hci_dev_lock(hdev);
2087
2088 hdev->io_capability = cp->io_capability;
2089
2090 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2091 hdev->io_capability);
2092
2093 hci_dev_unlock(hdev);
2094
2095 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2096 0);
2097 }
2098
2099 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2100 {
2101 struct hci_dev *hdev = conn->hdev;
2102 struct pending_cmd *cmd;
2103
2104 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2105 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2106 continue;
2107
2108 if (cmd->user_data != conn)
2109 continue;
2110
2111 return cmd;
2112 }
2113
2114 return NULL;
2115 }
2116
2117 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2118 {
2119 struct mgmt_rp_pair_device rp;
2120 struct hci_conn *conn = cmd->user_data;
2121
2122 bacpy(&rp.addr.bdaddr, &conn->dst);
2123 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2124
2125 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2126 &rp, sizeof(rp));
2127
2128 /* So we don't get further callbacks for this connection */
2129 conn->connect_cfm_cb = NULL;
2130 conn->security_cfm_cb = NULL;
2131 conn->disconn_cfm_cb = NULL;
2132
2133 hci_conn_drop(conn);
2134
2135 mgmt_pending_remove(cmd);
2136 }
2137
2138 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2139 {
2140 struct pending_cmd *cmd;
2141
2142 BT_DBG("status %u", status);
2143
2144 cmd = find_pairing(conn);
2145 if (!cmd)
2146 BT_DBG("Unable to find a pending command");
2147 else
2148 pairing_complete(cmd, mgmt_status(status));
2149 }
2150
2151 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2152 {
2153 struct pending_cmd *cmd;
2154
2155 BT_DBG("status %u", status);
2156
2157 if (!status)
2158 return;
2159
2160 cmd = find_pairing(conn);
2161 if (!cmd)
2162 BT_DBG("Unable to find a pending command");
2163 else
2164 pairing_complete(cmd, mgmt_status(status));
2165 }
2166
2167 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2168 u16 len)
2169 {
2170 struct mgmt_cp_pair_device *cp = data;
2171 struct mgmt_rp_pair_device rp;
2172 struct pending_cmd *cmd;
2173 u8 sec_level, auth_type;
2174 struct hci_conn *conn;
2175 int err;
2176
2177 BT_DBG("");
2178
2179 memset(&rp, 0, sizeof(rp));
2180 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2181 rp.addr.type = cp->addr.type;
2182
2183 if (!bdaddr_type_is_valid(cp->addr.type))
2184 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2185 MGMT_STATUS_INVALID_PARAMS,
2186 &rp, sizeof(rp));
2187
2188 hci_dev_lock(hdev);
2189
2190 if (!hdev_is_powered(hdev)) {
2191 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2192 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2193 goto unlock;
2194 }
2195
2196 sec_level = BT_SECURITY_MEDIUM;
2197 if (cp->io_cap == 0x03)
2198 auth_type = HCI_AT_DEDICATED_BONDING;
2199 else
2200 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2201
2202 if (cp->addr.type == BDADDR_BREDR)
2203 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2204 cp->addr.type, sec_level, auth_type);
2205 else
2206 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2207 cp->addr.type, sec_level, auth_type);
2208
2209 if (IS_ERR(conn)) {
2210 int status;
2211
2212 if (PTR_ERR(conn) == -EBUSY)
2213 status = MGMT_STATUS_BUSY;
2214 else
2215 status = MGMT_STATUS_CONNECT_FAILED;
2216
2217 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2218 status, &rp,
2219 sizeof(rp));
2220 goto unlock;
2221 }
2222
2223 if (conn->connect_cfm_cb) {
2224 hci_conn_drop(conn);
2225 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2226 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2227 goto unlock;
2228 }
2229
2230 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2231 if (!cmd) {
2232 err = -ENOMEM;
2233 hci_conn_drop(conn);
2234 goto unlock;
2235 }
2236
2237 /* For LE, just connecting isn't a proof that the pairing finished */
2238 if (cp->addr.type == BDADDR_BREDR)
2239 conn->connect_cfm_cb = pairing_complete_cb;
2240 else
2241 conn->connect_cfm_cb = le_connect_complete_cb;
2242
2243 conn->security_cfm_cb = pairing_complete_cb;
2244 conn->disconn_cfm_cb = pairing_complete_cb;
2245 conn->io_capability = cp->io_cap;
2246 cmd->user_data = conn;
2247
2248 if (conn->state == BT_CONNECTED &&
2249 hci_conn_security(conn, sec_level, auth_type))
2250 pairing_complete(cmd, 0);
2251
2252 err = 0;
2253
2254 unlock:
2255 hci_dev_unlock(hdev);
2256 return err;
2257 }
2258
2259 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2260 u16 len)
2261 {
2262 struct mgmt_addr_info *addr = data;
2263 struct pending_cmd *cmd;
2264 struct hci_conn *conn;
2265 int err;
2266
2267 BT_DBG("");
2268
2269 hci_dev_lock(hdev);
2270
2271 if (!hdev_is_powered(hdev)) {
2272 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2273 MGMT_STATUS_NOT_POWERED);
2274 goto unlock;
2275 }
2276
2277 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2278 if (!cmd) {
2279 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2280 MGMT_STATUS_INVALID_PARAMS);
2281 goto unlock;
2282 }
2283
2284 conn = cmd->user_data;
2285
2286 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2287 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2288 MGMT_STATUS_INVALID_PARAMS);
2289 goto unlock;
2290 }
2291
2292 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2293
2294 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2295 addr, sizeof(*addr));
2296 unlock:
2297 hci_dev_unlock(hdev);
2298 return err;
2299 }
2300
2301 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2302 struct mgmt_addr_info *addr, u16 mgmt_op,
2303 u16 hci_op, __le32 passkey)
2304 {
2305 struct pending_cmd *cmd;
2306 struct hci_conn *conn;
2307 int err;
2308
2309 hci_dev_lock(hdev);
2310
2311 if (!hdev_is_powered(hdev)) {
2312 err = cmd_complete(sk, hdev->id, mgmt_op,
2313 MGMT_STATUS_NOT_POWERED, addr,
2314 sizeof(*addr));
2315 goto done;
2316 }
2317
2318 if (addr->type == BDADDR_BREDR)
2319 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2320 else
2321 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2322
2323 if (!conn) {
2324 err = cmd_complete(sk, hdev->id, mgmt_op,
2325 MGMT_STATUS_NOT_CONNECTED, addr,
2326 sizeof(*addr));
2327 goto done;
2328 }
2329
2330 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2331 /* Continue with pairing via SMP */
2332 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2333
2334 if (!err)
2335 err = cmd_complete(sk, hdev->id, mgmt_op,
2336 MGMT_STATUS_SUCCESS, addr,
2337 sizeof(*addr));
2338 else
2339 err = cmd_complete(sk, hdev->id, mgmt_op,
2340 MGMT_STATUS_FAILED, addr,
2341 sizeof(*addr));
2342
2343 goto done;
2344 }
2345
2346 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2347 if (!cmd) {
2348 err = -ENOMEM;
2349 goto done;
2350 }
2351
2352 /* Continue with pairing via HCI */
2353 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2354 struct hci_cp_user_passkey_reply cp;
2355
2356 bacpy(&cp.bdaddr, &addr->bdaddr);
2357 cp.passkey = passkey;
2358 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2359 } else
2360 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2361 &addr->bdaddr);
2362
2363 if (err < 0)
2364 mgmt_pending_remove(cmd);
2365
2366 done:
2367 hci_dev_unlock(hdev);
2368 return err;
2369 }
2370
2371 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2372 void *data, u16 len)
2373 {
2374 struct mgmt_cp_pin_code_neg_reply *cp = data;
2375
2376 BT_DBG("");
2377
2378 return user_pairing_resp(sk, hdev, &cp->addr,
2379 MGMT_OP_PIN_CODE_NEG_REPLY,
2380 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2381 }
2382
2383 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2384 u16 len)
2385 {
2386 struct mgmt_cp_user_confirm_reply *cp = data;
2387
2388 BT_DBG("");
2389
2390 if (len != sizeof(*cp))
2391 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2392 MGMT_STATUS_INVALID_PARAMS);
2393
2394 return user_pairing_resp(sk, hdev, &cp->addr,
2395 MGMT_OP_USER_CONFIRM_REPLY,
2396 HCI_OP_USER_CONFIRM_REPLY, 0);
2397 }
2398
2399 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2400 void *data, u16 len)
2401 {
2402 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2403
2404 BT_DBG("");
2405
2406 return user_pairing_resp(sk, hdev, &cp->addr,
2407 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2408 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2409 }
2410
2411 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2412 u16 len)
2413 {
2414 struct mgmt_cp_user_passkey_reply *cp = data;
2415
2416 BT_DBG("");
2417
2418 return user_pairing_resp(sk, hdev, &cp->addr,
2419 MGMT_OP_USER_PASSKEY_REPLY,
2420 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2421 }
2422
2423 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2424 void *data, u16 len)
2425 {
2426 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2427
2428 BT_DBG("");
2429
2430 return user_pairing_resp(sk, hdev, &cp->addr,
2431 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2432 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2433 }
2434
2435 static void update_name(struct hci_request *req)
2436 {
2437 struct hci_dev *hdev = req->hdev;
2438 struct hci_cp_write_local_name cp;
2439
2440 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2441
2442 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2443 }
2444
2445 static void set_name_complete(struct hci_dev *hdev, u8 status)
2446 {
2447 struct mgmt_cp_set_local_name *cp;
2448 struct pending_cmd *cmd;
2449
2450 BT_DBG("status 0x%02x", status);
2451
2452 hci_dev_lock(hdev);
2453
2454 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2455 if (!cmd)
2456 goto unlock;
2457
2458 cp = cmd->param;
2459
2460 if (status)
2461 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2462 mgmt_status(status));
2463 else
2464 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2465 cp, sizeof(*cp));
2466
2467 mgmt_pending_remove(cmd);
2468
2469 unlock:
2470 hci_dev_unlock(hdev);
2471 }
2472
2473 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2474 u16 len)
2475 {
2476 struct mgmt_cp_set_local_name *cp = data;
2477 struct pending_cmd *cmd;
2478 struct hci_request req;
2479 int err;
2480
2481 BT_DBG("");
2482
2483 hci_dev_lock(hdev);
2484
2485 /* If the old values are the same as the new ones just return a
2486 * direct command complete event.
2487 */
2488 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2489 !memcmp(hdev->short_name, cp->short_name,
2490 sizeof(hdev->short_name))) {
2491 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2492 data, len);
2493 goto failed;
2494 }
2495
2496 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2497
2498 if (!hdev_is_powered(hdev)) {
2499 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2500
2501 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2502 data, len);
2503 if (err < 0)
2504 goto failed;
2505
2506 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2507 sk);
2508
2509 goto failed;
2510 }
2511
2512 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2513 if (!cmd) {
2514 err = -ENOMEM;
2515 goto failed;
2516 }
2517
2518 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2519
2520 hci_req_init(&req, hdev);
2521
2522 if (lmp_bredr_capable(hdev)) {
2523 update_name(&req);
2524 update_eir(&req);
2525 }
2526
2527 if (lmp_le_capable(hdev))
2528 hci_update_ad(&req);
2529
2530 err = hci_req_run(&req, set_name_complete);
2531 if (err < 0)
2532 mgmt_pending_remove(cmd);
2533
2534 failed:
2535 hci_dev_unlock(hdev);
2536 return err;
2537 }
2538
2539 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2540 void *data, u16 data_len)
2541 {
2542 struct pending_cmd *cmd;
2543 int err;
2544
2545 BT_DBG("%s", hdev->name);
2546
2547 hci_dev_lock(hdev);
2548
2549 if (!hdev_is_powered(hdev)) {
2550 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2551 MGMT_STATUS_NOT_POWERED);
2552 goto unlock;
2553 }
2554
2555 if (!lmp_ssp_capable(hdev)) {
2556 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2557 MGMT_STATUS_NOT_SUPPORTED);
2558 goto unlock;
2559 }
2560
2561 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2562 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2563 MGMT_STATUS_BUSY);
2564 goto unlock;
2565 }
2566
2567 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2568 if (!cmd) {
2569 err = -ENOMEM;
2570 goto unlock;
2571 }
2572
2573 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2574 if (err < 0)
2575 mgmt_pending_remove(cmd);
2576
2577 unlock:
2578 hci_dev_unlock(hdev);
2579 return err;
2580 }
2581
2582 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2583 void *data, u16 len)
2584 {
2585 struct mgmt_cp_add_remote_oob_data *cp = data;
2586 u8 status;
2587 int err;
2588
2589 BT_DBG("%s ", hdev->name);
2590
2591 hci_dev_lock(hdev);
2592
2593 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2594 cp->randomizer);
2595 if (err < 0)
2596 status = MGMT_STATUS_FAILED;
2597 else
2598 status = MGMT_STATUS_SUCCESS;
2599
2600 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2601 &cp->addr, sizeof(cp->addr));
2602
2603 hci_dev_unlock(hdev);
2604 return err;
2605 }
2606
2607 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2608 void *data, u16 len)
2609 {
2610 struct mgmt_cp_remove_remote_oob_data *cp = data;
2611 u8 status;
2612 int err;
2613
2614 BT_DBG("%s", hdev->name);
2615
2616 hci_dev_lock(hdev);
2617
2618 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2619 if (err < 0)
2620 status = MGMT_STATUS_INVALID_PARAMS;
2621 else
2622 status = MGMT_STATUS_SUCCESS;
2623
2624 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2625 status, &cp->addr, sizeof(cp->addr));
2626
2627 hci_dev_unlock(hdev);
2628 return err;
2629 }
2630
2631 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2632 {
2633 int err;
2634
2635 BT_DBG("%s", hdev->name);
2636
2637 hci_dev_lock(hdev);
2638
2639 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2640 if (err < 0)
2641 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2642
2643 hci_dev_unlock(hdev);
2644
2645 return err;
2646 }
2647
2648 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2649 void *data, u16 len)
2650 {
2651 struct mgmt_cp_start_discovery *cp = data;
2652 struct pending_cmd *cmd;
2653 int err;
2654
2655 BT_DBG("%s", hdev->name);
2656
2657 hci_dev_lock(hdev);
2658
2659 if (!hdev_is_powered(hdev)) {
2660 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2661 MGMT_STATUS_NOT_POWERED);
2662 goto failed;
2663 }
2664
2665 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2666 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2667 MGMT_STATUS_BUSY);
2668 goto failed;
2669 }
2670
2671 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2672 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2673 MGMT_STATUS_BUSY);
2674 goto failed;
2675 }
2676
2677 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2678 if (!cmd) {
2679 err = -ENOMEM;
2680 goto failed;
2681 }
2682
2683 hdev->discovery.type = cp->type;
2684
2685 switch (hdev->discovery.type) {
2686 case DISCOV_TYPE_BREDR:
2687 if (!lmp_bredr_capable(hdev)) {
2688 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2689 MGMT_STATUS_NOT_SUPPORTED);
2690 mgmt_pending_remove(cmd);
2691 goto failed;
2692 }
2693
2694 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2695 break;
2696
2697 case DISCOV_TYPE_LE:
2698 if (!lmp_host_le_capable(hdev)) {
2699 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2700 MGMT_STATUS_NOT_SUPPORTED);
2701 mgmt_pending_remove(cmd);
2702 goto failed;
2703 }
2704
2705 err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
2706 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2707 break;
2708
2709 case DISCOV_TYPE_INTERLEAVED:
2710 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2711 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2712 MGMT_STATUS_NOT_SUPPORTED);
2713 mgmt_pending_remove(cmd);
2714 goto failed;
2715 }
2716
2717 err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
2718 LE_SCAN_WIN, LE_SCAN_TIMEOUT_BREDR_LE);
2719 break;
2720
2721 default:
2722 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2723 MGMT_STATUS_INVALID_PARAMS);
2724 mgmt_pending_remove(cmd);
2725 goto failed;
2726 }
2727
2728 if (err < 0)
2729 mgmt_pending_remove(cmd);
2730 else
2731 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2732
2733 failed:
2734 hci_dev_unlock(hdev);
2735 return err;
2736 }
2737
2738 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2739 u16 len)
2740 {
2741 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2742 struct pending_cmd *cmd;
2743 struct hci_cp_remote_name_req_cancel cp;
2744 struct inquiry_entry *e;
2745 int err;
2746
2747 BT_DBG("%s", hdev->name);
2748
2749 hci_dev_lock(hdev);
2750
2751 if (!hci_discovery_active(hdev)) {
2752 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2753 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2754 sizeof(mgmt_cp->type));
2755 goto unlock;
2756 }
2757
2758 if (hdev->discovery.type != mgmt_cp->type) {
2759 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2760 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2761 sizeof(mgmt_cp->type));
2762 goto unlock;
2763 }
2764
2765 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2766 if (!cmd) {
2767 err = -ENOMEM;
2768 goto unlock;
2769 }
2770
2771 switch (hdev->discovery.state) {
2772 case DISCOVERY_FINDING:
2773 if (test_bit(HCI_INQUIRY, &hdev->flags))
2774 err = hci_cancel_inquiry(hdev);
2775 else
2776 err = hci_cancel_le_scan(hdev);
2777
2778 break;
2779
2780 case DISCOVERY_RESOLVING:
2781 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2782 NAME_PENDING);
2783 if (!e) {
2784 mgmt_pending_remove(cmd);
2785 err = cmd_complete(sk, hdev->id,
2786 MGMT_OP_STOP_DISCOVERY, 0,
2787 &mgmt_cp->type,
2788 sizeof(mgmt_cp->type));
2789 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2790 goto unlock;
2791 }
2792
2793 bacpy(&cp.bdaddr, &e->data.bdaddr);
2794 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2795 sizeof(cp), &cp);
2796
2797 break;
2798
2799 default:
2800 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2801 err = -EFAULT;
2802 }
2803
2804 if (err < 0)
2805 mgmt_pending_remove(cmd);
2806 else
2807 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2808
2809 unlock:
2810 hci_dev_unlock(hdev);
2811 return err;
2812 }
2813
2814 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2815 u16 len)
2816 {
2817 struct mgmt_cp_confirm_name *cp = data;
2818 struct inquiry_entry *e;
2819 int err;
2820
2821 BT_DBG("%s", hdev->name);
2822
2823 hci_dev_lock(hdev);
2824
2825 if (!hci_discovery_active(hdev)) {
2826 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2827 MGMT_STATUS_FAILED);
2828 goto failed;
2829 }
2830
2831 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2832 if (!e) {
2833 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2834 MGMT_STATUS_INVALID_PARAMS);
2835 goto failed;
2836 }
2837
2838 if (cp->name_known) {
2839 e->name_state = NAME_KNOWN;
2840 list_del(&e->list);
2841 } else {
2842 e->name_state = NAME_NEEDED;
2843 hci_inquiry_cache_update_resolve(hdev, e);
2844 }
2845
2846 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2847 sizeof(cp->addr));
2848
2849 failed:
2850 hci_dev_unlock(hdev);
2851 return err;
2852 }
2853
2854 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2855 u16 len)
2856 {
2857 struct mgmt_cp_block_device *cp = data;
2858 u8 status;
2859 int err;
2860
2861 BT_DBG("%s", hdev->name);
2862
2863 if (!bdaddr_type_is_valid(cp->addr.type))
2864 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2865 MGMT_STATUS_INVALID_PARAMS,
2866 &cp->addr, sizeof(cp->addr));
2867
2868 hci_dev_lock(hdev);
2869
2870 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2871 if (err < 0)
2872 status = MGMT_STATUS_FAILED;
2873 else
2874 status = MGMT_STATUS_SUCCESS;
2875
2876 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2877 &cp->addr, sizeof(cp->addr));
2878
2879 hci_dev_unlock(hdev);
2880
2881 return err;
2882 }
2883
2884 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2885 u16 len)
2886 {
2887 struct mgmt_cp_unblock_device *cp = data;
2888 u8 status;
2889 int err;
2890
2891 BT_DBG("%s", hdev->name);
2892
2893 if (!bdaddr_type_is_valid(cp->addr.type))
2894 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2895 MGMT_STATUS_INVALID_PARAMS,
2896 &cp->addr, sizeof(cp->addr));
2897
2898 hci_dev_lock(hdev);
2899
2900 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2901 if (err < 0)
2902 status = MGMT_STATUS_INVALID_PARAMS;
2903 else
2904 status = MGMT_STATUS_SUCCESS;
2905
2906 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2907 &cp->addr, sizeof(cp->addr));
2908
2909 hci_dev_unlock(hdev);
2910
2911 return err;
2912 }
2913
2914 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2915 u16 len)
2916 {
2917 struct mgmt_cp_set_device_id *cp = data;
2918 struct hci_request req;
2919 int err;
2920 __u16 source;
2921
2922 BT_DBG("%s", hdev->name);
2923
2924 source = __le16_to_cpu(cp->source);
2925
2926 if (source > 0x0002)
2927 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2928 MGMT_STATUS_INVALID_PARAMS);
2929
2930 hci_dev_lock(hdev);
2931
2932 hdev->devid_source = source;
2933 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2934 hdev->devid_product = __le16_to_cpu(cp->product);
2935 hdev->devid_version = __le16_to_cpu(cp->version);
2936
2937 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2938
2939 hci_req_init(&req, hdev);
2940 update_eir(&req);
2941 hci_req_run(&req, NULL);
2942
2943 hci_dev_unlock(hdev);
2944
2945 return err;
2946 }
2947
2948 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
2949 {
2950 struct pending_cmd *cmd;
2951
2952 BT_DBG("status 0x%02x", status);
2953
2954 hci_dev_lock(hdev);
2955
2956 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2957 if (!cmd)
2958 goto unlock;
2959
2960 if (status) {
2961 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2962 mgmt_status(status));
2963 } else {
2964 struct mgmt_mode *cp = cmd->param;
2965
2966 if (cp->val)
2967 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2968 else
2969 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2970
2971 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2972 new_settings(hdev, cmd->sk);
2973 }
2974
2975 mgmt_pending_remove(cmd);
2976
2977 unlock:
2978 hci_dev_unlock(hdev);
2979 }
2980
2981 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2982 void *data, u16 len)
2983 {
2984 struct mgmt_mode *cp = data;
2985 struct pending_cmd *cmd;
2986 struct hci_request req;
2987 int err;
2988
2989 BT_DBG("%s", hdev->name);
2990
2991 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
2992 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2993 MGMT_STATUS_NOT_SUPPORTED);
2994
2995 if (cp->val != 0x00 && cp->val != 0x01)
2996 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2997 MGMT_STATUS_INVALID_PARAMS);
2998
2999 if (!hdev_is_powered(hdev))
3000 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3001 MGMT_STATUS_NOT_POWERED);
3002
3003 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3004 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3005 MGMT_STATUS_REJECTED);
3006
3007 hci_dev_lock(hdev);
3008
3009 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3010 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3011 MGMT_STATUS_BUSY);
3012 goto unlock;
3013 }
3014
3015 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3016 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3017 hdev);
3018 goto unlock;
3019 }
3020
3021 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3022 data, len);
3023 if (!cmd) {
3024 err = -ENOMEM;
3025 goto unlock;
3026 }
3027
3028 hci_req_init(&req, hdev);
3029
3030 write_fast_connectable(&req, cp->val);
3031
3032 err = hci_req_run(&req, fast_connectable_complete);
3033 if (err < 0) {
3034 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3035 MGMT_STATUS_FAILED);
3036 mgmt_pending_remove(cmd);
3037 }
3038
3039 unlock:
3040 hci_dev_unlock(hdev);
3041
3042 return err;
3043 }
3044
3045 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3046 {
3047 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3048 return false;
3049 if (key->master != 0x00 && key->master != 0x01)
3050 return false;
3051 if (!bdaddr_type_is_le(key->addr.type))
3052 return false;
3053 return true;
3054 }
3055
3056 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3057 void *cp_data, u16 len)
3058 {
3059 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3060 u16 key_count, expected_len;
3061 int i, err;
3062
3063 key_count = __le16_to_cpu(cp->key_count);
3064
3065 expected_len = sizeof(*cp) + key_count *
3066 sizeof(struct mgmt_ltk_info);
3067 if (expected_len != len) {
3068 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3069 len, expected_len);
3070 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3071 MGMT_STATUS_INVALID_PARAMS);
3072 }
3073
3074 BT_DBG("%s key_count %u", hdev->name, key_count);
3075
3076 for (i = 0; i < key_count; i++) {
3077 struct mgmt_ltk_info *key = &cp->keys[i];
3078
3079 if (!ltk_is_valid(key))
3080 return cmd_status(sk, hdev->id,
3081 MGMT_OP_LOAD_LONG_TERM_KEYS,
3082 MGMT_STATUS_INVALID_PARAMS);
3083 }
3084
3085 hci_dev_lock(hdev);
3086
3087 hci_smp_ltks_clear(hdev);
3088
3089 for (i = 0; i < key_count; i++) {
3090 struct mgmt_ltk_info *key = &cp->keys[i];
3091 u8 type;
3092
3093 if (key->master)
3094 type = HCI_SMP_LTK;
3095 else
3096 type = HCI_SMP_LTK_SLAVE;
3097
3098 hci_add_ltk(hdev, &key->addr.bdaddr,
3099 bdaddr_to_le(key->addr.type),
3100 type, 0, key->authenticated, key->val,
3101 key->enc_size, key->ediv, key->rand);
3102 }
3103
3104 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3105 NULL, 0);
3106
3107 hci_dev_unlock(hdev);
3108
3109 return err;
3110 }
3111
3112 static const struct mgmt_handler {
3113 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3114 u16 data_len);
3115 bool var_len;
3116 size_t data_len;
3117 } mgmt_handlers[] = {
3118 { NULL }, /* 0x0000 (no command) */
3119 { read_version, false, MGMT_READ_VERSION_SIZE },
3120 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3121 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3122 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3123 { set_powered, false, MGMT_SETTING_SIZE },
3124 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3125 { set_connectable, false, MGMT_SETTING_SIZE },
3126 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3127 { set_pairable, false, MGMT_SETTING_SIZE },
3128 { set_link_security, false, MGMT_SETTING_SIZE },
3129 { set_ssp, false, MGMT_SETTING_SIZE },
3130 { set_hs, false, MGMT_SETTING_SIZE },
3131 { set_le, false, MGMT_SETTING_SIZE },
3132 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3133 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3134 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3135 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3136 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3137 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3138 { disconnect, false, MGMT_DISCONNECT_SIZE },
3139 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3140 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3141 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3142 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3143 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3144 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3145 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3146 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3147 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3148 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3149 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3150 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3151 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3152 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3153 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3154 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3155 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3156 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3157 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3158 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3159 };
3160
3161
3162 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3163 {
3164 void *buf;
3165 u8 *cp;
3166 struct mgmt_hdr *hdr;
3167 u16 opcode, index, len;
3168 struct hci_dev *hdev = NULL;
3169 const struct mgmt_handler *handler;
3170 int err;
3171
3172 BT_DBG("got %zu bytes", msglen);
3173
3174 if (msglen < sizeof(*hdr))
3175 return -EINVAL;
3176
3177 buf = kmalloc(msglen, GFP_KERNEL);
3178 if (!buf)
3179 return -ENOMEM;
3180
3181 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3182 err = -EFAULT;
3183 goto done;
3184 }
3185
3186 hdr = buf;
3187 opcode = __le16_to_cpu(hdr->opcode);
3188 index = __le16_to_cpu(hdr->index);
3189 len = __le16_to_cpu(hdr->len);
3190
3191 if (len != msglen - sizeof(*hdr)) {
3192 err = -EINVAL;
3193 goto done;
3194 }
3195
3196 if (index != MGMT_INDEX_NONE) {
3197 hdev = hci_dev_get(index);
3198 if (!hdev) {
3199 err = cmd_status(sk, index, opcode,
3200 MGMT_STATUS_INVALID_INDEX);
3201 goto done;
3202 }
3203 }
3204
3205 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3206 mgmt_handlers[opcode].func == NULL) {
3207 BT_DBG("Unknown op %u", opcode);
3208 err = cmd_status(sk, index, opcode,
3209 MGMT_STATUS_UNKNOWN_COMMAND);
3210 goto done;
3211 }
3212
3213 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3214 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3215 err = cmd_status(sk, index, opcode,
3216 MGMT_STATUS_INVALID_INDEX);
3217 goto done;
3218 }
3219
3220 handler = &mgmt_handlers[opcode];
3221
3222 if ((handler->var_len && len < handler->data_len) ||
3223 (!handler->var_len && len != handler->data_len)) {
3224 err = cmd_status(sk, index, opcode,
3225 MGMT_STATUS_INVALID_PARAMS);
3226 goto done;
3227 }
3228
3229 if (hdev)
3230 mgmt_init_hdev(sk, hdev);
3231
3232 cp = buf + sizeof(*hdr);
3233
3234 err = handler->func(sk, hdev, cp, len);
3235 if (err < 0)
3236 goto done;
3237
3238 err = msglen;
3239
3240 done:
3241 if (hdev)
3242 hci_dev_put(hdev);
3243
3244 kfree(buf);
3245 return err;
3246 }
3247
3248 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3249 {
3250 u8 *status = data;
3251
3252 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3253 mgmt_pending_remove(cmd);
3254 }
3255
3256 int mgmt_index_added(struct hci_dev *hdev)
3257 {
3258 if (!mgmt_valid_hdev(hdev))
3259 return -ENOTSUPP;
3260
3261 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3262 }
3263
3264 int mgmt_index_removed(struct hci_dev *hdev)
3265 {
3266 u8 status = MGMT_STATUS_INVALID_INDEX;
3267
3268 if (!mgmt_valid_hdev(hdev))
3269 return -ENOTSUPP;
3270
3271 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3272
3273 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3274 }
3275
3276 struct cmd_lookup {
3277 struct sock *sk;
3278 struct hci_dev *hdev;
3279 u8 mgmt_status;
3280 };
3281
3282 static void settings_rsp(struct pending_cmd *cmd, void *data)
3283 {
3284 struct cmd_lookup *match = data;
3285
3286 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3287
3288 list_del(&cmd->list);
3289
3290 if (match->sk == NULL) {
3291 match->sk = cmd->sk;
3292 sock_hold(match->sk);
3293 }
3294
3295 mgmt_pending_free(cmd);
3296 }
3297
3298 static void set_bredr_scan(struct hci_request *req)
3299 {
3300 struct hci_dev *hdev = req->hdev;
3301 u8 scan = 0;
3302
3303 /* Ensure that fast connectable is disabled. This function will
3304 * not do anything if the page scan parameters are already what
3305 * they should be.
3306 */
3307 write_fast_connectable(req, false);
3308
3309 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3310 scan |= SCAN_PAGE;
3311 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3312 scan |= SCAN_INQUIRY;
3313
3314 if (scan)
3315 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3316 }
3317
3318 static void powered_complete(struct hci_dev *hdev, u8 status)
3319 {
3320 struct cmd_lookup match = { NULL, hdev };
3321
3322 BT_DBG("status 0x%02x", status);
3323
3324 hci_dev_lock(hdev);
3325
3326 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3327
3328 new_settings(hdev, match.sk);
3329
3330 hci_dev_unlock(hdev);
3331
3332 if (match.sk)
3333 sock_put(match.sk);
3334 }
3335
3336 static int powered_update_hci(struct hci_dev *hdev)
3337 {
3338 struct hci_request req;
3339 u8 link_sec;
3340
3341 hci_req_init(&req, hdev);
3342
3343 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3344 !lmp_host_ssp_capable(hdev)) {
3345 u8 ssp = 1;
3346
3347 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3348 }
3349
3350 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
3351 struct hci_cp_write_le_host_supported cp;
3352
3353 cp.le = 1;
3354 cp.simul = lmp_le_br_capable(hdev);
3355
3356 /* Check first if we already have the right
3357 * host state (host features set)
3358 */
3359 if (cp.le != lmp_host_le_capable(hdev) ||
3360 cp.simul != lmp_host_le_br_capable(hdev))
3361 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3362 sizeof(cp), &cp);
3363 }
3364
3365 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3366 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3367 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3368 sizeof(link_sec), &link_sec);
3369
3370 if (lmp_bredr_capable(hdev)) {
3371 set_bredr_scan(&req);
3372 update_class(&req);
3373 update_name(&req);
3374 update_eir(&req);
3375 }
3376
3377 return hci_req_run(&req, powered_complete);
3378 }
3379
3380 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3381 {
3382 struct cmd_lookup match = { NULL, hdev };
3383 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3384 u8 zero_cod[] = { 0, 0, 0 };
3385 int err;
3386
3387 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3388 return 0;
3389
3390 if (powered) {
3391 if (powered_update_hci(hdev) == 0)
3392 return 0;
3393
3394 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3395 &match);
3396 goto new_settings;
3397 }
3398
3399 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3400 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3401
3402 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3403 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3404 zero_cod, sizeof(zero_cod), NULL);
3405
3406 new_settings:
3407 err = new_settings(hdev, match.sk);
3408
3409 if (match.sk)
3410 sock_put(match.sk);
3411
3412 return err;
3413 }
3414
3415 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3416 {
3417 struct cmd_lookup match = { NULL, hdev };
3418 bool changed = false;
3419 int err = 0;
3420
3421 if (discoverable) {
3422 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3423 changed = true;
3424 } else {
3425 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3426 changed = true;
3427 }
3428
3429 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3430 &match);
3431
3432 if (changed)
3433 err = new_settings(hdev, match.sk);
3434
3435 if (match.sk)
3436 sock_put(match.sk);
3437
3438 return err;
3439 }
3440
3441 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3442 {
3443 struct pending_cmd *cmd;
3444 bool changed = false;
3445 int err = 0;
3446
3447 if (connectable) {
3448 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3449 changed = true;
3450 } else {
3451 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3452 changed = true;
3453 }
3454
3455 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3456
3457 if (changed)
3458 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3459
3460 return err;
3461 }
3462
3463 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3464 {
3465 u8 mgmt_err = mgmt_status(status);
3466
3467 if (scan & SCAN_PAGE)
3468 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3469 cmd_status_rsp, &mgmt_err);
3470
3471 if (scan & SCAN_INQUIRY)
3472 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3473 cmd_status_rsp, &mgmt_err);
3474
3475 return 0;
3476 }
3477
3478 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3479 bool persistent)
3480 {
3481 struct mgmt_ev_new_link_key ev;
3482
3483 memset(&ev, 0, sizeof(ev));
3484
3485 ev.store_hint = persistent;
3486 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3487 ev.key.addr.type = BDADDR_BREDR;
3488 ev.key.type = key->type;
3489 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3490 ev.key.pin_len = key->pin_len;
3491
3492 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3493 }
3494
3495 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3496 {
3497 struct mgmt_ev_new_long_term_key ev;
3498
3499 memset(&ev, 0, sizeof(ev));
3500
3501 ev.store_hint = persistent;
3502 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3503 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3504 ev.key.authenticated = key->authenticated;
3505 ev.key.enc_size = key->enc_size;
3506 ev.key.ediv = key->ediv;
3507
3508 if (key->type == HCI_SMP_LTK)
3509 ev.key.master = 1;
3510
3511 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3512 memcpy(ev.key.val, key->val, sizeof(key->val));
3513
3514 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3515 NULL);
3516 }
3517
3518 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3519 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3520 u8 *dev_class)
3521 {
3522 char buf[512];
3523 struct mgmt_ev_device_connected *ev = (void *) buf;
3524 u16 eir_len = 0;
3525
3526 bacpy(&ev->addr.bdaddr, bdaddr);
3527 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3528
3529 ev->flags = __cpu_to_le32(flags);
3530
3531 if (name_len > 0)
3532 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3533 name, name_len);
3534
3535 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3536 eir_len = eir_append_data(ev->eir, eir_len,
3537 EIR_CLASS_OF_DEV, dev_class, 3);
3538
3539 ev->eir_len = cpu_to_le16(eir_len);
3540
3541 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3542 sizeof(*ev) + eir_len, NULL);
3543 }
3544
3545 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3546 {
3547 struct mgmt_cp_disconnect *cp = cmd->param;
3548 struct sock **sk = data;
3549 struct mgmt_rp_disconnect rp;
3550
3551 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3552 rp.addr.type = cp->addr.type;
3553
3554 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3555 sizeof(rp));
3556
3557 *sk = cmd->sk;
3558 sock_hold(*sk);
3559
3560 mgmt_pending_remove(cmd);
3561 }
3562
3563 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3564 {
3565 struct hci_dev *hdev = data;
3566 struct mgmt_cp_unpair_device *cp = cmd->param;
3567 struct mgmt_rp_unpair_device rp;
3568
3569 memset(&rp, 0, sizeof(rp));
3570 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3571 rp.addr.type = cp->addr.type;
3572
3573 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3574
3575 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3576
3577 mgmt_pending_remove(cmd);
3578 }
3579
3580 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3581 u8 link_type, u8 addr_type, u8 reason)
3582 {
3583 struct mgmt_ev_device_disconnected ev;
3584 struct sock *sk = NULL;
3585 int err;
3586
3587 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3588
3589 bacpy(&ev.addr.bdaddr, bdaddr);
3590 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3591 ev.reason = reason;
3592
3593 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3594 sk);
3595
3596 if (sk)
3597 sock_put(sk);
3598
3599 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3600 hdev);
3601
3602 return err;
3603 }
3604
3605 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3606 u8 link_type, u8 addr_type, u8 status)
3607 {
3608 struct mgmt_rp_disconnect rp;
3609 struct pending_cmd *cmd;
3610 int err;
3611
3612 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3613 hdev);
3614
3615 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3616 if (!cmd)
3617 return -ENOENT;
3618
3619 bacpy(&rp.addr.bdaddr, bdaddr);
3620 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3621
3622 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3623 mgmt_status(status), &rp, sizeof(rp));
3624
3625 mgmt_pending_remove(cmd);
3626
3627 return err;
3628 }
3629
3630 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3631 u8 addr_type, u8 status)
3632 {
3633 struct mgmt_ev_connect_failed ev;
3634
3635 bacpy(&ev.addr.bdaddr, bdaddr);
3636 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3637 ev.status = mgmt_status(status);
3638
3639 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3640 }
3641
3642 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3643 {
3644 struct mgmt_ev_pin_code_request ev;
3645
3646 bacpy(&ev.addr.bdaddr, bdaddr);
3647 ev.addr.type = BDADDR_BREDR;
3648 ev.secure = secure;
3649
3650 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3651 NULL);
3652 }
3653
3654 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3655 u8 status)
3656 {
3657 struct pending_cmd *cmd;
3658 struct mgmt_rp_pin_code_reply rp;
3659 int err;
3660
3661 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3662 if (!cmd)
3663 return -ENOENT;
3664
3665 bacpy(&rp.addr.bdaddr, bdaddr);
3666 rp.addr.type = BDADDR_BREDR;
3667
3668 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3669 mgmt_status(status), &rp, sizeof(rp));
3670
3671 mgmt_pending_remove(cmd);
3672
3673 return err;
3674 }
3675
3676 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3677 u8 status)
3678 {
3679 struct pending_cmd *cmd;
3680 struct mgmt_rp_pin_code_reply rp;
3681 int err;
3682
3683 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3684 if (!cmd)
3685 return -ENOENT;
3686
3687 bacpy(&rp.addr.bdaddr, bdaddr);
3688 rp.addr.type = BDADDR_BREDR;
3689
3690 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3691 mgmt_status(status), &rp, sizeof(rp));
3692
3693 mgmt_pending_remove(cmd);
3694
3695 return err;
3696 }
3697
3698 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3699 u8 link_type, u8 addr_type, __le32 value,
3700 u8 confirm_hint)
3701 {
3702 struct mgmt_ev_user_confirm_request ev;
3703
3704 BT_DBG("%s", hdev->name);
3705
3706 bacpy(&ev.addr.bdaddr, bdaddr);
3707 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3708 ev.confirm_hint = confirm_hint;
3709 ev.value = value;
3710
3711 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3712 NULL);
3713 }
3714
3715 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3716 u8 link_type, u8 addr_type)
3717 {
3718 struct mgmt_ev_user_passkey_request ev;
3719
3720 BT_DBG("%s", hdev->name);
3721
3722 bacpy(&ev.addr.bdaddr, bdaddr);
3723 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3724
3725 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3726 NULL);
3727 }
3728
3729 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3730 u8 link_type, u8 addr_type, u8 status,
3731 u8 opcode)
3732 {
3733 struct pending_cmd *cmd;
3734 struct mgmt_rp_user_confirm_reply rp;
3735 int err;
3736
3737 cmd = mgmt_pending_find(opcode, hdev);
3738 if (!cmd)
3739 return -ENOENT;
3740
3741 bacpy(&rp.addr.bdaddr, bdaddr);
3742 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3743 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3744 &rp, sizeof(rp));
3745
3746 mgmt_pending_remove(cmd);
3747
3748 return err;
3749 }
3750
3751 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3752 u8 link_type, u8 addr_type, u8 status)
3753 {
3754 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3755 status, MGMT_OP_USER_CONFIRM_REPLY);
3756 }
3757
3758 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3759 u8 link_type, u8 addr_type, u8 status)
3760 {
3761 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3762 status,
3763 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3764 }
3765
3766 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3767 u8 link_type, u8 addr_type, u8 status)
3768 {
3769 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3770 status, MGMT_OP_USER_PASSKEY_REPLY);
3771 }
3772
3773 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3774 u8 link_type, u8 addr_type, u8 status)
3775 {
3776 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3777 status,
3778 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3779 }
3780
3781 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3782 u8 link_type, u8 addr_type, u32 passkey,
3783 u8 entered)
3784 {
3785 struct mgmt_ev_passkey_notify ev;
3786
3787 BT_DBG("%s", hdev->name);
3788
3789 bacpy(&ev.addr.bdaddr, bdaddr);
3790 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3791 ev.passkey = __cpu_to_le32(passkey);
3792 ev.entered = entered;
3793
3794 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3795 }
3796
3797 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3798 u8 addr_type, u8 status)
3799 {
3800 struct mgmt_ev_auth_failed ev;
3801
3802 bacpy(&ev.addr.bdaddr, bdaddr);
3803 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3804 ev.status = mgmt_status(status);
3805
3806 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3807 }
3808
3809 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3810 {
3811 struct cmd_lookup match = { NULL, hdev };
3812 bool changed = false;
3813 int err = 0;
3814
3815 if (status) {
3816 u8 mgmt_err = mgmt_status(status);
3817 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3818 cmd_status_rsp, &mgmt_err);
3819 return 0;
3820 }
3821
3822 if (test_bit(HCI_AUTH, &hdev->flags)) {
3823 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3824 changed = true;
3825 } else {
3826 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3827 changed = true;
3828 }
3829
3830 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3831 &match);
3832
3833 if (changed)
3834 err = new_settings(hdev, match.sk);
3835
3836 if (match.sk)
3837 sock_put(match.sk);
3838
3839 return err;
3840 }
3841
3842 static void clear_eir(struct hci_request *req)
3843 {
3844 struct hci_dev *hdev = req->hdev;
3845 struct hci_cp_write_eir cp;
3846
3847 if (!lmp_ext_inq_capable(hdev))
3848 return;
3849
3850 memset(hdev->eir, 0, sizeof(hdev->eir));
3851
3852 memset(&cp, 0, sizeof(cp));
3853
3854 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3855 }
3856
3857 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3858 {
3859 struct cmd_lookup match = { NULL, hdev };
3860 struct hci_request req;
3861 bool changed = false;
3862 int err = 0;
3863
3864 if (status) {
3865 u8 mgmt_err = mgmt_status(status);
3866
3867 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3868 &hdev->dev_flags))
3869 err = new_settings(hdev, NULL);
3870
3871 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3872 &mgmt_err);
3873
3874 return err;
3875 }
3876
3877 if (enable) {
3878 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3879 changed = true;
3880 } else {
3881 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3882 changed = true;
3883 }
3884
3885 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3886
3887 if (changed)
3888 err = new_settings(hdev, match.sk);
3889
3890 if (match.sk)
3891 sock_put(match.sk);
3892
3893 hci_req_init(&req, hdev);
3894
3895 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3896 update_eir(&req);
3897 else
3898 clear_eir(&req);
3899
3900 hci_req_run(&req, NULL);
3901
3902 return err;
3903 }
3904
3905 static void sk_lookup(struct pending_cmd *cmd, void *data)
3906 {
3907 struct cmd_lookup *match = data;
3908
3909 if (match->sk == NULL) {
3910 match->sk = cmd->sk;
3911 sock_hold(match->sk);
3912 }
3913 }
3914
3915 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3916 u8 status)
3917 {
3918 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3919 int err = 0;
3920
3921 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3922 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3923 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3924
3925 if (!status)
3926 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3927 3, NULL);
3928
3929 if (match.sk)
3930 sock_put(match.sk);
3931
3932 return err;
3933 }
3934
3935 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3936 {
3937 struct mgmt_cp_set_local_name ev;
3938 struct pending_cmd *cmd;
3939
3940 if (status)
3941 return 0;
3942
3943 memset(&ev, 0, sizeof(ev));
3944 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3945 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3946
3947 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3948 if (!cmd) {
3949 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3950
3951 /* If this is a HCI command related to powering on the
3952 * HCI dev don't send any mgmt signals.
3953 */
3954 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
3955 return 0;
3956 }
3957
3958 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
3959 cmd ? cmd->sk : NULL);
3960 }
3961
3962 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3963 u8 *randomizer, u8 status)
3964 {
3965 struct pending_cmd *cmd;
3966 int err;
3967
3968 BT_DBG("%s status %u", hdev->name, status);
3969
3970 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3971 if (!cmd)
3972 return -ENOENT;
3973
3974 if (status) {
3975 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3976 mgmt_status(status));
3977 } else {
3978 struct mgmt_rp_read_local_oob_data rp;
3979
3980 memcpy(rp.hash, hash, sizeof(rp.hash));
3981 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3982
3983 err = cmd_complete(cmd->sk, hdev->id,
3984 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3985 sizeof(rp));
3986 }
3987
3988 mgmt_pending_remove(cmd);
3989
3990 return err;
3991 }
3992
3993 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3994 {
3995 struct cmd_lookup match = { NULL, hdev };
3996 bool changed = false;
3997 int err = 0;
3998
3999 if (status) {
4000 u8 mgmt_err = mgmt_status(status);
4001
4002 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
4003 &hdev->dev_flags))
4004 err = new_settings(hdev, NULL);
4005
4006 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
4007 &mgmt_err);
4008
4009 return err;
4010 }
4011
4012 if (enable) {
4013 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4014 changed = true;
4015 } else {
4016 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4017 changed = true;
4018 }
4019
4020 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
4021
4022 if (changed)
4023 err = new_settings(hdev, match.sk);
4024
4025 if (match.sk)
4026 sock_put(match.sk);
4027
4028 return err;
4029 }
4030
4031 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4032 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4033 ssp, u8 *eir, u16 eir_len)
4034 {
4035 char buf[512];
4036 struct mgmt_ev_device_found *ev = (void *) buf;
4037 size_t ev_size;
4038
4039 /* Leave 5 bytes for a potential CoD field */
4040 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4041 return -EINVAL;
4042
4043 memset(buf, 0, sizeof(buf));
4044
4045 bacpy(&ev->addr.bdaddr, bdaddr);
4046 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4047 ev->rssi = rssi;
4048 if (cfm_name)
4049 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4050 if (!ssp)
4051 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4052
4053 if (eir_len > 0)
4054 memcpy(ev->eir, eir, eir_len);
4055
4056 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4057 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4058 dev_class, 3);
4059
4060 ev->eir_len = cpu_to_le16(eir_len);
4061 ev_size = sizeof(*ev) + eir_len;
4062
4063 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4064 }
4065
4066 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4067 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4068 {
4069 struct mgmt_ev_device_found *ev;
4070 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4071 u16 eir_len;
4072
4073 ev = (struct mgmt_ev_device_found *) buf;
4074
4075 memset(buf, 0, sizeof(buf));
4076
4077 bacpy(&ev->addr.bdaddr, bdaddr);
4078 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4079 ev->rssi = rssi;
4080
4081 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4082 name_len);
4083
4084 ev->eir_len = cpu_to_le16(eir_len);
4085
4086 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4087 sizeof(*ev) + eir_len, NULL);
4088 }
4089
4090 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
4091 {
4092 struct pending_cmd *cmd;
4093 u8 type;
4094 int err;
4095
4096 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4097
4098 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4099 if (!cmd)
4100 return -ENOENT;
4101
4102 type = hdev->discovery.type;
4103
4104 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4105 &type, sizeof(type));
4106 mgmt_pending_remove(cmd);
4107
4108 return err;
4109 }
4110
4111 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
4112 {
4113 struct pending_cmd *cmd;
4114 int err;
4115
4116 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4117 if (!cmd)
4118 return -ENOENT;
4119
4120 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4121 &hdev->discovery.type, sizeof(hdev->discovery.type));
4122 mgmt_pending_remove(cmd);
4123
4124 return err;
4125 }
4126
4127 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4128 {
4129 struct mgmt_ev_discovering ev;
4130 struct pending_cmd *cmd;
4131
4132 BT_DBG("%s discovering %u", hdev->name, discovering);
4133
4134 if (discovering)
4135 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4136 else
4137 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4138
4139 if (cmd != NULL) {
4140 u8 type = hdev->discovery.type;
4141
4142 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4143 sizeof(type));
4144 mgmt_pending_remove(cmd);
4145 }
4146
4147 memset(&ev, 0, sizeof(ev));
4148 ev.type = hdev->discovery.type;
4149 ev.discovering = discovering;
4150
4151 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4152 }
4153
4154 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4155 {
4156 struct pending_cmd *cmd;
4157 struct mgmt_ev_device_blocked ev;
4158
4159 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4160
4161 bacpy(&ev.addr.bdaddr, bdaddr);
4162 ev.addr.type = type;
4163
4164 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4165 cmd ? cmd->sk : NULL);
4166 }
4167
4168 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4169 {
4170 struct pending_cmd *cmd;
4171 struct mgmt_ev_device_unblocked ev;
4172
4173 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4174
4175 bacpy(&ev.addr.bdaddr, bdaddr);
4176 ev.addr.type = type;
4177
4178 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4179 cmd ? cmd->sk : NULL);
4180 }
4181
4182 module_param(enable_hs, bool, 0644);
4183 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");