Bluetooth: mgmt: Fix updating local name when powering on
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <net/sock.h>
39
40 #include <asm/system.h>
41 #include <linux/uaccess.h>
42 #include <asm/unaligned.h>
43
44 #include <net/bluetooth/bluetooth.h>
45 #include <net/bluetooth/hci_core.h>
46
47 /* Handle HCI Event packets */
48
49 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
50 {
51 __u8 status = *((__u8 *) skb->data);
52
53 BT_DBG("%s status 0x%x", hdev->name, status);
54
55 if (status) {
56 hci_dev_lock(hdev);
57 mgmt_stop_discovery_failed(hdev, status);
58 hci_dev_unlock(hdev);
59 return;
60 }
61
62 clear_bit(HCI_INQUIRY, &hdev->flags);
63
64 hci_dev_lock(hdev);
65 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
66 hci_dev_unlock(hdev);
67
68 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
69
70 hci_conn_check_pending(hdev);
71 }
72
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 __u8 status = *((__u8 *) skb->data);
76
77 BT_DBG("%s status 0x%x", hdev->name, status);
78
79 if (status)
80 return;
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
86 {
87 BT_DBG("%s", hdev->name);
88 }
89
90 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
91 {
92 struct hci_rp_role_discovery *rp = (void *) skb->data;
93 struct hci_conn *conn;
94
95 BT_DBG("%s status 0x%x", hdev->name, rp->status);
96
97 if (rp->status)
98 return;
99
100 hci_dev_lock(hdev);
101
102 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
103 if (conn) {
104 if (rp->role)
105 conn->link_mode &= ~HCI_LM_MASTER;
106 else
107 conn->link_mode |= HCI_LM_MASTER;
108 }
109
110 hci_dev_unlock(hdev);
111 }
112
113 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
114 {
115 struct hci_rp_read_link_policy *rp = (void *) skb->data;
116 struct hci_conn *conn;
117
118 BT_DBG("%s status 0x%x", hdev->name, rp->status);
119
120 if (rp->status)
121 return;
122
123 hci_dev_lock(hdev);
124
125 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
126 if (conn)
127 conn->link_policy = __le16_to_cpu(rp->policy);
128
129 hci_dev_unlock(hdev);
130 }
131
132 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
133 {
134 struct hci_rp_write_link_policy *rp = (void *) skb->data;
135 struct hci_conn *conn;
136 void *sent;
137
138 BT_DBG("%s status 0x%x", hdev->name, rp->status);
139
140 if (rp->status)
141 return;
142
143 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
144 if (!sent)
145 return;
146
147 hci_dev_lock(hdev);
148
149 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
150 if (conn)
151 conn->link_policy = get_unaligned_le16(sent + 2);
152
153 hci_dev_unlock(hdev);
154 }
155
156 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
157 {
158 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
159
160 BT_DBG("%s status 0x%x", hdev->name, rp->status);
161
162 if (rp->status)
163 return;
164
165 hdev->link_policy = __le16_to_cpu(rp->policy);
166 }
167
168 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
169 {
170 __u8 status = *((__u8 *) skb->data);
171 void *sent;
172
173 BT_DBG("%s status 0x%x", hdev->name, status);
174
175 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
176 if (!sent)
177 return;
178
179 if (!status)
180 hdev->link_policy = get_unaligned_le16(sent);
181
182 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
183 }
184
185 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
186 {
187 __u8 status = *((__u8 *) skb->data);
188
189 BT_DBG("%s status 0x%x", hdev->name, status);
190
191 clear_bit(HCI_RESET, &hdev->flags);
192
193 hci_req_complete(hdev, HCI_OP_RESET, status);
194
195 /* Reset all non-persistent flags */
196 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
197
198 hdev->discovery.state = DISCOVERY_STOPPED;
199 }
200
201 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
202 {
203 __u8 status = *((__u8 *) skb->data);
204 void *sent;
205
206 BT_DBG("%s status 0x%x", hdev->name, status);
207
208 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
209 if (!sent)
210 return;
211
212 hci_dev_lock(hdev);
213
214 if (test_bit(HCI_MGMT, &hdev->dev_flags))
215 mgmt_set_local_name_complete(hdev, sent, status);
216 else if (!status)
217 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
218
219 hci_dev_unlock(hdev);
220
221 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
222 }
223
224 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
225 {
226 struct hci_rp_read_local_name *rp = (void *) skb->data;
227
228 BT_DBG("%s status 0x%x", hdev->name, rp->status);
229
230 if (rp->status)
231 return;
232
233 if (test_bit(HCI_SETUP, &hdev->dev_flags))
234 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
235 }
236
237 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
238 {
239 __u8 status = *((__u8 *) skb->data);
240 void *sent;
241
242 BT_DBG("%s status 0x%x", hdev->name, status);
243
244 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
245 if (!sent)
246 return;
247
248 if (!status) {
249 __u8 param = *((__u8 *) sent);
250
251 if (param == AUTH_ENABLED)
252 set_bit(HCI_AUTH, &hdev->flags);
253 else
254 clear_bit(HCI_AUTH, &hdev->flags);
255 }
256
257 if (test_bit(HCI_MGMT, &hdev->dev_flags))
258 mgmt_auth_enable_complete(hdev, status);
259
260 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
261 }
262
263 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264 {
265 __u8 status = *((__u8 *) skb->data);
266 void *sent;
267
268 BT_DBG("%s status 0x%x", hdev->name, status);
269
270 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
271 if (!sent)
272 return;
273
274 if (!status) {
275 __u8 param = *((__u8 *) sent);
276
277 if (param)
278 set_bit(HCI_ENCRYPT, &hdev->flags);
279 else
280 clear_bit(HCI_ENCRYPT, &hdev->flags);
281 }
282
283 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
284 }
285
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 {
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
290 void *sent;
291
292 BT_DBG("%s status 0x%x", hdev->name, status);
293
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 if (!sent)
296 return;
297
298 param = *((__u8 *) sent);
299
300 hci_dev_lock(hdev);
301
302 if (status != 0) {
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
305 goto done;
306 }
307
308 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310
311 if (param & SCAN_INQUIRY) {
312 set_bit(HCI_ISCAN, &hdev->flags);
313 if (!old_iscan)
314 mgmt_discoverable(hdev, 1);
315 if (hdev->discov_timeout > 0) {
316 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318 to);
319 }
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
322
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
325 if (!old_pscan)
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
329
330 done:
331 hci_dev_unlock(hdev);
332 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
333 }
334
335 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
336 {
337 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
338
339 BT_DBG("%s status 0x%x", hdev->name, rp->status);
340
341 if (rp->status)
342 return;
343
344 memcpy(hdev->dev_class, rp->dev_class, 3);
345
346 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
347 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
348 }
349
350 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
351 {
352 __u8 status = *((__u8 *) skb->data);
353 void *sent;
354
355 BT_DBG("%s status 0x%x", hdev->name, status);
356
357 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
358 if (!sent)
359 return;
360
361 hci_dev_lock(hdev);
362
363 if (status == 0)
364 memcpy(hdev->dev_class, sent, 3);
365
366 if (test_bit(HCI_MGMT, &hdev->dev_flags))
367 mgmt_set_class_of_dev_complete(hdev, sent, status);
368
369 hci_dev_unlock(hdev);
370 }
371
372 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
373 {
374 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
375 __u16 setting;
376
377 BT_DBG("%s status 0x%x", hdev->name, rp->status);
378
379 if (rp->status)
380 return;
381
382 setting = __le16_to_cpu(rp->voice_setting);
383
384 if (hdev->voice_setting == setting)
385 return;
386
387 hdev->voice_setting = setting;
388
389 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
390
391 if (hdev->notify)
392 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
393 }
394
395 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
396 {
397 __u8 status = *((__u8 *) skb->data);
398 __u16 setting;
399 void *sent;
400
401 BT_DBG("%s status 0x%x", hdev->name, status);
402
403 if (status)
404 return;
405
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 if (!sent)
408 return;
409
410 setting = get_unaligned_le16(sent);
411
412 if (hdev->voice_setting == setting)
413 return;
414
415 hdev->voice_setting = setting;
416
417 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
418
419 if (hdev->notify)
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421 }
422
423 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 __u8 status = *((__u8 *) skb->data);
426
427 BT_DBG("%s status 0x%x", hdev->name, status);
428
429 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
430 }
431
432 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
433 {
434 __u8 status = *((__u8 *) skb->data);
435 void *sent;
436
437 BT_DBG("%s status 0x%x", hdev->name, status);
438
439 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
440 if (!sent)
441 return;
442
443 if (test_bit(HCI_MGMT, &hdev->dev_flags))
444 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
445 else if (!status) {
446 if (*((u8 *) sent))
447 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 else
449 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
450 }
451 }
452
453 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
454 {
455 if (hdev->features[6] & LMP_EXT_INQ)
456 return 2;
457
458 if (hdev->features[3] & LMP_RSSI_INQ)
459 return 1;
460
461 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
462 hdev->lmp_subver == 0x0757)
463 return 1;
464
465 if (hdev->manufacturer == 15) {
466 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
467 return 1;
468 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
469 return 1;
470 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
471 return 1;
472 }
473
474 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
475 hdev->lmp_subver == 0x1805)
476 return 1;
477
478 return 0;
479 }
480
481 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
482 {
483 u8 mode;
484
485 mode = hci_get_inquiry_mode(hdev);
486
487 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
488 }
489
490 static void hci_setup_event_mask(struct hci_dev *hdev)
491 {
492 /* The second byte is 0xff instead of 0x9f (two reserved bits
493 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
494 * command otherwise */
495 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
496
497 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
498 * any event mask for pre 1.2 devices */
499 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
500 return;
501
502 events[4] |= 0x01; /* Flow Specification Complete */
503 events[4] |= 0x02; /* Inquiry Result with RSSI */
504 events[4] |= 0x04; /* Read Remote Extended Features Complete */
505 events[5] |= 0x08; /* Synchronous Connection Complete */
506 events[5] |= 0x10; /* Synchronous Connection Changed */
507
508 if (hdev->features[3] & LMP_RSSI_INQ)
509 events[4] |= 0x04; /* Inquiry Result with RSSI */
510
511 if (hdev->features[5] & LMP_SNIFF_SUBR)
512 events[5] |= 0x20; /* Sniff Subrating */
513
514 if (hdev->features[5] & LMP_PAUSE_ENC)
515 events[5] |= 0x80; /* Encryption Key Refresh Complete */
516
517 if (hdev->features[6] & LMP_EXT_INQ)
518 events[5] |= 0x40; /* Extended Inquiry Result */
519
520 if (hdev->features[6] & LMP_NO_FLUSH)
521 events[7] |= 0x01; /* Enhanced Flush Complete */
522
523 if (hdev->features[7] & LMP_LSTO)
524 events[6] |= 0x80; /* Link Supervision Timeout Changed */
525
526 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
527 events[6] |= 0x01; /* IO Capability Request */
528 events[6] |= 0x02; /* IO Capability Response */
529 events[6] |= 0x04; /* User Confirmation Request */
530 events[6] |= 0x08; /* User Passkey Request */
531 events[6] |= 0x10; /* Remote OOB Data Request */
532 events[6] |= 0x20; /* Simple Pairing Complete */
533 events[7] |= 0x04; /* User Passkey Notification */
534 events[7] |= 0x08; /* Keypress Notification */
535 events[7] |= 0x10; /* Remote Host Supported
536 * Features Notification */
537 }
538
539 if (hdev->features[4] & LMP_LE)
540 events[7] |= 0x20; /* LE Meta-Event */
541
542 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
543 }
544
545 static void hci_setup(struct hci_dev *hdev)
546 {
547 if (hdev->dev_type != HCI_BREDR)
548 return;
549
550 hci_setup_event_mask(hdev);
551
552 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
553 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
554
555 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
556 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
557 u8 mode = 0x01;
558 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
559 sizeof(mode), &mode);
560 } else {
561 struct hci_cp_write_eir cp;
562
563 memset(hdev->eir, 0, sizeof(hdev->eir));
564 memset(&cp, 0, sizeof(cp));
565
566 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
567 }
568 }
569
570 if (hdev->features[3] & LMP_RSSI_INQ)
571 hci_setup_inquiry_mode(hdev);
572
573 if (hdev->features[7] & LMP_INQ_TX_PWR)
574 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
575
576 if (hdev->features[7] & LMP_EXTFEATURES) {
577 struct hci_cp_read_local_ext_features cp;
578
579 cp.page = 0x01;
580 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
581 sizeof(cp), &cp);
582 }
583
584 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
585 u8 enable = 1;
586 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE,
587 sizeof(enable), &enable);
588 }
589 }
590
591 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
592 {
593 struct hci_rp_read_local_version *rp = (void *) skb->data;
594
595 BT_DBG("%s status 0x%x", hdev->name, rp->status);
596
597 if (rp->status)
598 goto done;
599
600 hdev->hci_ver = rp->hci_ver;
601 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
602 hdev->lmp_ver = rp->lmp_ver;
603 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
604 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
605
606 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
607 hdev->manufacturer,
608 hdev->hci_ver, hdev->hci_rev);
609
610 if (test_bit(HCI_INIT, &hdev->flags))
611 hci_setup(hdev);
612
613 done:
614 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
615 }
616
617 static void hci_setup_link_policy(struct hci_dev *hdev)
618 {
619 u16 link_policy = 0;
620
621 if (hdev->features[0] & LMP_RSWITCH)
622 link_policy |= HCI_LP_RSWITCH;
623 if (hdev->features[0] & LMP_HOLD)
624 link_policy |= HCI_LP_HOLD;
625 if (hdev->features[0] & LMP_SNIFF)
626 link_policy |= HCI_LP_SNIFF;
627 if (hdev->features[1] & LMP_PARK)
628 link_policy |= HCI_LP_PARK;
629
630 link_policy = cpu_to_le16(link_policy);
631 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
632 sizeof(link_policy), &link_policy);
633 }
634
635 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
636 {
637 struct hci_rp_read_local_commands *rp = (void *) skb->data;
638
639 BT_DBG("%s status 0x%x", hdev->name, rp->status);
640
641 if (rp->status)
642 goto done;
643
644 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
645
646 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
647 hci_setup_link_policy(hdev);
648
649 done:
650 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
651 }
652
653 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
654 {
655 struct hci_rp_read_local_features *rp = (void *) skb->data;
656
657 BT_DBG("%s status 0x%x", hdev->name, rp->status);
658
659 if (rp->status)
660 return;
661
662 memcpy(hdev->features, rp->features, 8);
663
664 /* Adjust default settings according to features
665 * supported by device. */
666
667 if (hdev->features[0] & LMP_3SLOT)
668 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
669
670 if (hdev->features[0] & LMP_5SLOT)
671 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
672
673 if (hdev->features[1] & LMP_HV2) {
674 hdev->pkt_type |= (HCI_HV2);
675 hdev->esco_type |= (ESCO_HV2);
676 }
677
678 if (hdev->features[1] & LMP_HV3) {
679 hdev->pkt_type |= (HCI_HV3);
680 hdev->esco_type |= (ESCO_HV3);
681 }
682
683 if (hdev->features[3] & LMP_ESCO)
684 hdev->esco_type |= (ESCO_EV3);
685
686 if (hdev->features[4] & LMP_EV4)
687 hdev->esco_type |= (ESCO_EV4);
688
689 if (hdev->features[4] & LMP_EV5)
690 hdev->esco_type |= (ESCO_EV5);
691
692 if (hdev->features[5] & LMP_EDR_ESCO_2M)
693 hdev->esco_type |= (ESCO_2EV3);
694
695 if (hdev->features[5] & LMP_EDR_ESCO_3M)
696 hdev->esco_type |= (ESCO_3EV3);
697
698 if (hdev->features[5] & LMP_EDR_3S_ESCO)
699 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
700
701 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
702 hdev->features[0], hdev->features[1],
703 hdev->features[2], hdev->features[3],
704 hdev->features[4], hdev->features[5],
705 hdev->features[6], hdev->features[7]);
706 }
707
708 static void hci_set_le_support(struct hci_dev *hdev)
709 {
710 struct hci_cp_write_le_host_supported cp;
711
712 memset(&cp, 0, sizeof(cp));
713
714 if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
715 cp.le = 1;
716 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
717 }
718
719 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
720 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
721 sizeof(cp), &cp);
722 }
723
724 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
725 struct sk_buff *skb)
726 {
727 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
728
729 BT_DBG("%s status 0x%x", hdev->name, rp->status);
730
731 if (rp->status)
732 goto done;
733
734 switch (rp->page) {
735 case 0:
736 memcpy(hdev->features, rp->features, 8);
737 break;
738 case 1:
739 memcpy(hdev->host_features, rp->features, 8);
740 break;
741 }
742
743 if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
744 hci_set_le_support(hdev);
745
746 done:
747 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
748 }
749
750 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
751 struct sk_buff *skb)
752 {
753 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
754
755 BT_DBG("%s status 0x%x", hdev->name, rp->status);
756
757 if (rp->status)
758 return;
759
760 hdev->flow_ctl_mode = rp->mode;
761
762 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
763 }
764
765 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
766 {
767 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
768
769 BT_DBG("%s status 0x%x", hdev->name, rp->status);
770
771 if (rp->status)
772 return;
773
774 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
775 hdev->sco_mtu = rp->sco_mtu;
776 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
777 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
778
779 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
780 hdev->sco_mtu = 64;
781 hdev->sco_pkts = 8;
782 }
783
784 hdev->acl_cnt = hdev->acl_pkts;
785 hdev->sco_cnt = hdev->sco_pkts;
786
787 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
788 hdev->acl_mtu, hdev->acl_pkts,
789 hdev->sco_mtu, hdev->sco_pkts);
790 }
791
792 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
793 {
794 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
795
796 BT_DBG("%s status 0x%x", hdev->name, rp->status);
797
798 if (!rp->status)
799 bacpy(&hdev->bdaddr, &rp->bdaddr);
800
801 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
802 }
803
804 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
805 struct sk_buff *skb)
806 {
807 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
808
809 BT_DBG("%s status 0x%x", hdev->name, rp->status);
810
811 if (rp->status)
812 return;
813
814 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
815 hdev->block_len = __le16_to_cpu(rp->block_len);
816 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
817
818 hdev->block_cnt = hdev->num_blocks;
819
820 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
821 hdev->block_cnt, hdev->block_len);
822
823 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
824 }
825
826 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
827 {
828 __u8 status = *((__u8 *) skb->data);
829
830 BT_DBG("%s status 0x%x", hdev->name, status);
831
832 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
833 }
834
835 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
836 struct sk_buff *skb)
837 {
838 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
839
840 BT_DBG("%s status 0x%x", hdev->name, rp->status);
841
842 if (rp->status)
843 return;
844
845 hdev->amp_status = rp->amp_status;
846 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
847 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
848 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
849 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
850 hdev->amp_type = rp->amp_type;
851 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
852 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
853 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
854 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
855
856 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
857 }
858
859 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
860 struct sk_buff *skb)
861 {
862 __u8 status = *((__u8 *) skb->data);
863
864 BT_DBG("%s status 0x%x", hdev->name, status);
865
866 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
867 }
868
869 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
870 {
871 __u8 status = *((__u8 *) skb->data);
872
873 BT_DBG("%s status 0x%x", hdev->name, status);
874
875 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
876 }
877
878 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
879 struct sk_buff *skb)
880 {
881 __u8 status = *((__u8 *) skb->data);
882
883 BT_DBG("%s status 0x%x", hdev->name, status);
884
885 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
886 }
887
888 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
889 struct sk_buff *skb)
890 {
891 __u8 status = *((__u8 *) skb->data);
892
893 BT_DBG("%s status 0x%x", hdev->name, status);
894
895 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
896 }
897
898 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
899 {
900 __u8 status = *((__u8 *) skb->data);
901
902 BT_DBG("%s status 0x%x", hdev->name, status);
903
904 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
905 }
906
907 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
908 {
909 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
910 struct hci_cp_pin_code_reply *cp;
911 struct hci_conn *conn;
912
913 BT_DBG("%s status 0x%x", hdev->name, rp->status);
914
915 hci_dev_lock(hdev);
916
917 if (test_bit(HCI_MGMT, &hdev->dev_flags))
918 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
919
920 if (rp->status != 0)
921 goto unlock;
922
923 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
924 if (!cp)
925 goto unlock;
926
927 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
928 if (conn)
929 conn->pin_length = cp->pin_len;
930
931 unlock:
932 hci_dev_unlock(hdev);
933 }
934
935 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
936 {
937 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
938
939 BT_DBG("%s status 0x%x", hdev->name, rp->status);
940
941 hci_dev_lock(hdev);
942
943 if (test_bit(HCI_MGMT, &hdev->dev_flags))
944 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
945 rp->status);
946
947 hci_dev_unlock(hdev);
948 }
949
950 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
951 struct sk_buff *skb)
952 {
953 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
954
955 BT_DBG("%s status 0x%x", hdev->name, rp->status);
956
957 if (rp->status)
958 return;
959
960 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
961 hdev->le_pkts = rp->le_max_pkt;
962
963 hdev->le_cnt = hdev->le_pkts;
964
965 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
966
967 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
968 }
969
970 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
971 {
972 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
973
974 BT_DBG("%s status 0x%x", hdev->name, rp->status);
975
976 hci_dev_lock(hdev);
977
978 if (test_bit(HCI_MGMT, &hdev->dev_flags))
979 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
980 0, rp->status);
981
982 hci_dev_unlock(hdev);
983 }
984
985 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
986 struct sk_buff *skb)
987 {
988 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
989
990 BT_DBG("%s status 0x%x", hdev->name, rp->status);
991
992 hci_dev_lock(hdev);
993
994 if (test_bit(HCI_MGMT, &hdev->dev_flags))
995 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
996 ACL_LINK, 0,
997 rp->status);
998
999 hci_dev_unlock(hdev);
1000 }
1001
1002 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1003 {
1004 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1005
1006 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1007
1008 hci_dev_lock(hdev);
1009
1010 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1011 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1012 0, rp->status);
1013
1014 hci_dev_unlock(hdev);
1015 }
1016
1017 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1018 struct sk_buff *skb)
1019 {
1020 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1021
1022 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1023
1024 hci_dev_lock(hdev);
1025
1026 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1027 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1028 ACL_LINK, 0,
1029 rp->status);
1030
1031 hci_dev_unlock(hdev);
1032 }
1033
1034 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1035 struct sk_buff *skb)
1036 {
1037 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1038
1039 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1040
1041 hci_dev_lock(hdev);
1042 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1043 rp->randomizer, rp->status);
1044 hci_dev_unlock(hdev);
1045 }
1046
1047 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1048 {
1049 __u8 status = *((__u8 *) skb->data);
1050
1051 BT_DBG("%s status 0x%x", hdev->name, status);
1052
1053 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1054
1055 if (status) {
1056 hci_dev_lock(hdev);
1057 mgmt_start_discovery_failed(hdev, status);
1058 hci_dev_unlock(hdev);
1059 return;
1060 }
1061 }
1062
1063 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1064 struct sk_buff *skb)
1065 {
1066 struct hci_cp_le_set_scan_enable *cp;
1067 __u8 status = *((__u8 *) skb->data);
1068
1069 BT_DBG("%s status 0x%x", hdev->name, status);
1070
1071 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1072 if (!cp)
1073 return;
1074
1075 switch (cp->enable) {
1076 case LE_SCANNING_ENABLED:
1077 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1078
1079 if (status) {
1080 hci_dev_lock(hdev);
1081 mgmt_start_discovery_failed(hdev, status);
1082 hci_dev_unlock(hdev);
1083 return;
1084 }
1085
1086 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1087
1088 cancel_delayed_work_sync(&hdev->adv_work);
1089
1090 hci_dev_lock(hdev);
1091 hci_adv_entries_clear(hdev);
1092 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1093 hci_dev_unlock(hdev);
1094 break;
1095
1096 case LE_SCANNING_DISABLED:
1097 if (status)
1098 return;
1099
1100 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1101
1102 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1103
1104 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1105 mgmt_interleaved_discovery(hdev);
1106 } else {
1107 hci_dev_lock(hdev);
1108 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1109 hci_dev_unlock(hdev);
1110 }
1111
1112 break;
1113
1114 default:
1115 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1116 break;
1117 }
1118 }
1119
1120 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1121 {
1122 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1123
1124 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1125
1126 if (rp->status)
1127 return;
1128
1129 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1130 }
1131
1132 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1133 {
1134 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1135
1136 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1137
1138 if (rp->status)
1139 return;
1140
1141 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1142 }
1143
1144 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1145 struct sk_buff *skb)
1146 {
1147 struct hci_cp_write_le_host_supported *sent;
1148 __u8 status = *((__u8 *) skb->data);
1149
1150 BT_DBG("%s status 0x%x", hdev->name, status);
1151
1152 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1153 if (!sent)
1154 return;
1155
1156 if (!status) {
1157 if (sent->le)
1158 hdev->host_features[0] |= LMP_HOST_LE;
1159 else
1160 hdev->host_features[0] &= ~LMP_HOST_LE;
1161 }
1162
1163 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1164 !test_bit(HCI_INIT, &hdev->flags))
1165 mgmt_le_enable_complete(hdev, sent->le, status);
1166
1167 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1168 }
1169
1170 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1171 {
1172 BT_DBG("%s status 0x%x", hdev->name, status);
1173
1174 if (status) {
1175 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1176 hci_conn_check_pending(hdev);
1177 hci_dev_lock(hdev);
1178 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1179 mgmt_start_discovery_failed(hdev, status);
1180 hci_dev_unlock(hdev);
1181 return;
1182 }
1183
1184 set_bit(HCI_INQUIRY, &hdev->flags);
1185
1186 hci_dev_lock(hdev);
1187 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1188 hci_dev_unlock(hdev);
1189 }
1190
1191 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1192 {
1193 struct hci_cp_create_conn *cp;
1194 struct hci_conn *conn;
1195
1196 BT_DBG("%s status 0x%x", hdev->name, status);
1197
1198 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1199 if (!cp)
1200 return;
1201
1202 hci_dev_lock(hdev);
1203
1204 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1205
1206 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1207
1208 if (status) {
1209 if (conn && conn->state == BT_CONNECT) {
1210 if (status != 0x0c || conn->attempt > 2) {
1211 conn->state = BT_CLOSED;
1212 hci_proto_connect_cfm(conn, status);
1213 hci_conn_del(conn);
1214 } else
1215 conn->state = BT_CONNECT2;
1216 }
1217 } else {
1218 if (!conn) {
1219 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1220 if (conn) {
1221 conn->out = true;
1222 conn->link_mode |= HCI_LM_MASTER;
1223 } else
1224 BT_ERR("No memory for new connection");
1225 }
1226 }
1227
1228 hci_dev_unlock(hdev);
1229 }
1230
1231 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1232 {
1233 struct hci_cp_add_sco *cp;
1234 struct hci_conn *acl, *sco;
1235 __u16 handle;
1236
1237 BT_DBG("%s status 0x%x", hdev->name, status);
1238
1239 if (!status)
1240 return;
1241
1242 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1243 if (!cp)
1244 return;
1245
1246 handle = __le16_to_cpu(cp->handle);
1247
1248 BT_DBG("%s handle %d", hdev->name, handle);
1249
1250 hci_dev_lock(hdev);
1251
1252 acl = hci_conn_hash_lookup_handle(hdev, handle);
1253 if (acl) {
1254 sco = acl->link;
1255 if (sco) {
1256 sco->state = BT_CLOSED;
1257
1258 hci_proto_connect_cfm(sco, status);
1259 hci_conn_del(sco);
1260 }
1261 }
1262
1263 hci_dev_unlock(hdev);
1264 }
1265
1266 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1267 {
1268 struct hci_cp_auth_requested *cp;
1269 struct hci_conn *conn;
1270
1271 BT_DBG("%s status 0x%x", hdev->name, status);
1272
1273 if (!status)
1274 return;
1275
1276 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1277 if (!cp)
1278 return;
1279
1280 hci_dev_lock(hdev);
1281
1282 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1283 if (conn) {
1284 if (conn->state == BT_CONFIG) {
1285 hci_proto_connect_cfm(conn, status);
1286 hci_conn_put(conn);
1287 }
1288 }
1289
1290 hci_dev_unlock(hdev);
1291 }
1292
1293 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1294 {
1295 struct hci_cp_set_conn_encrypt *cp;
1296 struct hci_conn *conn;
1297
1298 BT_DBG("%s status 0x%x", hdev->name, status);
1299
1300 if (!status)
1301 return;
1302
1303 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1304 if (!cp)
1305 return;
1306
1307 hci_dev_lock(hdev);
1308
1309 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1310 if (conn) {
1311 if (conn->state == BT_CONFIG) {
1312 hci_proto_connect_cfm(conn, status);
1313 hci_conn_put(conn);
1314 }
1315 }
1316
1317 hci_dev_unlock(hdev);
1318 }
1319
1320 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1321 struct hci_conn *conn)
1322 {
1323 if (conn->state != BT_CONFIG || !conn->out)
1324 return 0;
1325
1326 if (conn->pending_sec_level == BT_SECURITY_SDP)
1327 return 0;
1328
1329 /* Only request authentication for SSP connections or non-SSP
1330 * devices with sec_level HIGH or if MITM protection is requested */
1331 if (!hci_conn_ssp_enabled(conn) &&
1332 conn->pending_sec_level != BT_SECURITY_HIGH &&
1333 !(conn->auth_type & 0x01))
1334 return 0;
1335
1336 return 1;
1337 }
1338
1339 static inline int hci_resolve_name(struct hci_dev *hdev,
1340 struct inquiry_entry *e)
1341 {
1342 struct hci_cp_remote_name_req cp;
1343
1344 memset(&cp, 0, sizeof(cp));
1345
1346 bacpy(&cp.bdaddr, &e->data.bdaddr);
1347 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1348 cp.pscan_mode = e->data.pscan_mode;
1349 cp.clock_offset = e->data.clock_offset;
1350
1351 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1352 }
1353
1354 static bool hci_resolve_next_name(struct hci_dev *hdev)
1355 {
1356 struct discovery_state *discov = &hdev->discovery;
1357 struct inquiry_entry *e;
1358
1359 if (list_empty(&discov->resolve))
1360 return false;
1361
1362 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1363 if (hci_resolve_name(hdev, e) == 0) {
1364 e->name_state = NAME_PENDING;
1365 return true;
1366 }
1367
1368 return false;
1369 }
1370
1371 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1372 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1373 {
1374 struct discovery_state *discov = &hdev->discovery;
1375 struct inquiry_entry *e;
1376
1377 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1378 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0,
1379 name, name_len, conn->dev_class);
1380
1381 if (discov->state == DISCOVERY_STOPPED)
1382 return;
1383
1384 if (discov->state == DISCOVERY_STOPPING)
1385 goto discov_complete;
1386
1387 if (discov->state != DISCOVERY_RESOLVING)
1388 return;
1389
1390 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1391 if (e) {
1392 e->name_state = NAME_KNOWN;
1393 list_del(&e->list);
1394 if (name)
1395 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1396 e->data.rssi, name, name_len);
1397 }
1398
1399 if (hci_resolve_next_name(hdev))
1400 return;
1401
1402 discov_complete:
1403 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1404 }
1405
1406 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1407 {
1408 struct hci_cp_remote_name_req *cp;
1409 struct hci_conn *conn;
1410
1411 BT_DBG("%s status 0x%x", hdev->name, status);
1412
1413 /* If successful wait for the name req complete event before
1414 * checking for the need to do authentication */
1415 if (!status)
1416 return;
1417
1418 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1419 if (!cp)
1420 return;
1421
1422 hci_dev_lock(hdev);
1423
1424 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1425
1426 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1427 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1428
1429 if (!conn)
1430 goto unlock;
1431
1432 if (!hci_outgoing_auth_needed(hdev, conn))
1433 goto unlock;
1434
1435 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1436 struct hci_cp_auth_requested cp;
1437 cp.handle = __cpu_to_le16(conn->handle);
1438 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1439 }
1440
1441 unlock:
1442 hci_dev_unlock(hdev);
1443 }
1444
1445 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1446 {
1447 struct hci_cp_read_remote_features *cp;
1448 struct hci_conn *conn;
1449
1450 BT_DBG("%s status 0x%x", hdev->name, status);
1451
1452 if (!status)
1453 return;
1454
1455 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1456 if (!cp)
1457 return;
1458
1459 hci_dev_lock(hdev);
1460
1461 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1462 if (conn) {
1463 if (conn->state == BT_CONFIG) {
1464 hci_proto_connect_cfm(conn, status);
1465 hci_conn_put(conn);
1466 }
1467 }
1468
1469 hci_dev_unlock(hdev);
1470 }
1471
1472 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1473 {
1474 struct hci_cp_read_remote_ext_features *cp;
1475 struct hci_conn *conn;
1476
1477 BT_DBG("%s status 0x%x", hdev->name, status);
1478
1479 if (!status)
1480 return;
1481
1482 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1483 if (!cp)
1484 return;
1485
1486 hci_dev_lock(hdev);
1487
1488 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1489 if (conn) {
1490 if (conn->state == BT_CONFIG) {
1491 hci_proto_connect_cfm(conn, status);
1492 hci_conn_put(conn);
1493 }
1494 }
1495
1496 hci_dev_unlock(hdev);
1497 }
1498
1499 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1500 {
1501 struct hci_cp_setup_sync_conn *cp;
1502 struct hci_conn *acl, *sco;
1503 __u16 handle;
1504
1505 BT_DBG("%s status 0x%x", hdev->name, status);
1506
1507 if (!status)
1508 return;
1509
1510 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1511 if (!cp)
1512 return;
1513
1514 handle = __le16_to_cpu(cp->handle);
1515
1516 BT_DBG("%s handle %d", hdev->name, handle);
1517
1518 hci_dev_lock(hdev);
1519
1520 acl = hci_conn_hash_lookup_handle(hdev, handle);
1521 if (acl) {
1522 sco = acl->link;
1523 if (sco) {
1524 sco->state = BT_CLOSED;
1525
1526 hci_proto_connect_cfm(sco, status);
1527 hci_conn_del(sco);
1528 }
1529 }
1530
1531 hci_dev_unlock(hdev);
1532 }
1533
1534 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1535 {
1536 struct hci_cp_sniff_mode *cp;
1537 struct hci_conn *conn;
1538
1539 BT_DBG("%s status 0x%x", hdev->name, status);
1540
1541 if (!status)
1542 return;
1543
1544 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1545 if (!cp)
1546 return;
1547
1548 hci_dev_lock(hdev);
1549
1550 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1551 if (conn) {
1552 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1553
1554 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1555 hci_sco_setup(conn, status);
1556 }
1557
1558 hci_dev_unlock(hdev);
1559 }
1560
1561 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1562 {
1563 struct hci_cp_exit_sniff_mode *cp;
1564 struct hci_conn *conn;
1565
1566 BT_DBG("%s status 0x%x", hdev->name, status);
1567
1568 if (!status)
1569 return;
1570
1571 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1572 if (!cp)
1573 return;
1574
1575 hci_dev_lock(hdev);
1576
1577 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1578 if (conn) {
1579 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1580
1581 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1582 hci_sco_setup(conn, status);
1583 }
1584
1585 hci_dev_unlock(hdev);
1586 }
1587
1588 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1589 {
1590 struct hci_cp_disconnect *cp;
1591 struct hci_conn *conn;
1592
1593 if (!status)
1594 return;
1595
1596 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1597 if (!cp)
1598 return;
1599
1600 hci_dev_lock(hdev);
1601
1602 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1603 if (conn)
1604 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1605 conn->dst_type, status);
1606
1607 hci_dev_unlock(hdev);
1608 }
1609
1610 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1611 {
1612 struct hci_cp_le_create_conn *cp;
1613 struct hci_conn *conn;
1614
1615 BT_DBG("%s status 0x%x", hdev->name, status);
1616
1617 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1618 if (!cp)
1619 return;
1620
1621 hci_dev_lock(hdev);
1622
1623 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1624
1625 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1626 conn);
1627
1628 if (status) {
1629 if (conn && conn->state == BT_CONNECT) {
1630 conn->state = BT_CLOSED;
1631 hci_proto_connect_cfm(conn, status);
1632 hci_conn_del(conn);
1633 }
1634 } else {
1635 if (!conn) {
1636 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1637 if (conn) {
1638 conn->dst_type = cp->peer_addr_type;
1639 conn->out = true;
1640 } else {
1641 BT_ERR("No memory for new connection");
1642 }
1643 }
1644 }
1645
1646 hci_dev_unlock(hdev);
1647 }
1648
1649 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1650 {
1651 BT_DBG("%s status 0x%x", hdev->name, status);
1652 }
1653
1654 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1655 {
1656 __u8 status = *((__u8 *) skb->data);
1657 struct discovery_state *discov = &hdev->discovery;
1658 struct inquiry_entry *e;
1659
1660 BT_DBG("%s status %d", hdev->name, status);
1661
1662 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1663
1664 hci_conn_check_pending(hdev);
1665
1666 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1667 return;
1668
1669 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1670 return;
1671
1672 hci_dev_lock(hdev);
1673
1674 if (discov->state != DISCOVERY_FINDING)
1675 goto unlock;
1676
1677 if (list_empty(&discov->resolve)) {
1678 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1679 goto unlock;
1680 }
1681
1682 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1683 if (e && hci_resolve_name(hdev, e) == 0) {
1684 e->name_state = NAME_PENDING;
1685 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1686 } else {
1687 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1688 }
1689
1690 unlock:
1691 hci_dev_unlock(hdev);
1692 }
1693
1694 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1695 {
1696 struct inquiry_data data;
1697 struct inquiry_info *info = (void *) (skb->data + 1);
1698 int num_rsp = *((__u8 *) skb->data);
1699
1700 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1701
1702 if (!num_rsp)
1703 return;
1704
1705 hci_dev_lock(hdev);
1706
1707 for (; num_rsp; num_rsp--, info++) {
1708 bool name_known, ssp;
1709
1710 bacpy(&data.bdaddr, &info->bdaddr);
1711 data.pscan_rep_mode = info->pscan_rep_mode;
1712 data.pscan_period_mode = info->pscan_period_mode;
1713 data.pscan_mode = info->pscan_mode;
1714 memcpy(data.dev_class, info->dev_class, 3);
1715 data.clock_offset = info->clock_offset;
1716 data.rssi = 0x00;
1717 data.ssp_mode = 0x00;
1718
1719 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1720 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1721 info->dev_class, 0, !name_known, ssp,
1722 NULL, 0);
1723 }
1724
1725 hci_dev_unlock(hdev);
1726 }
1727
1728 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1729 {
1730 struct hci_ev_conn_complete *ev = (void *) skb->data;
1731 struct hci_conn *conn;
1732
1733 BT_DBG("%s", hdev->name);
1734
1735 hci_dev_lock(hdev);
1736
1737 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1738 if (!conn) {
1739 if (ev->link_type != SCO_LINK)
1740 goto unlock;
1741
1742 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1743 if (!conn)
1744 goto unlock;
1745
1746 conn->type = SCO_LINK;
1747 }
1748
1749 if (!ev->status) {
1750 conn->handle = __le16_to_cpu(ev->handle);
1751
1752 if (conn->type == ACL_LINK) {
1753 conn->state = BT_CONFIG;
1754 hci_conn_hold(conn);
1755 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1756 } else
1757 conn->state = BT_CONNECTED;
1758
1759 hci_conn_hold_device(conn);
1760 hci_conn_add_sysfs(conn);
1761
1762 if (test_bit(HCI_AUTH, &hdev->flags))
1763 conn->link_mode |= HCI_LM_AUTH;
1764
1765 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1766 conn->link_mode |= HCI_LM_ENCRYPT;
1767
1768 /* Get remote features */
1769 if (conn->type == ACL_LINK) {
1770 struct hci_cp_read_remote_features cp;
1771 cp.handle = ev->handle;
1772 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1773 sizeof(cp), &cp);
1774 }
1775
1776 /* Set packet type for incoming connection */
1777 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1778 struct hci_cp_change_conn_ptype cp;
1779 cp.handle = ev->handle;
1780 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1781 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1782 sizeof(cp), &cp);
1783 }
1784 } else {
1785 conn->state = BT_CLOSED;
1786 if (conn->type == ACL_LINK)
1787 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1788 conn->dst_type, ev->status);
1789 }
1790
1791 if (conn->type == ACL_LINK)
1792 hci_sco_setup(conn, ev->status);
1793
1794 if (ev->status) {
1795 hci_proto_connect_cfm(conn, ev->status);
1796 hci_conn_del(conn);
1797 } else if (ev->link_type != ACL_LINK)
1798 hci_proto_connect_cfm(conn, ev->status);
1799
1800 unlock:
1801 hci_dev_unlock(hdev);
1802
1803 hci_conn_check_pending(hdev);
1804 }
1805
1806 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1807 {
1808 struct hci_ev_conn_request *ev = (void *) skb->data;
1809 int mask = hdev->link_mode;
1810
1811 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1812 batostr(&ev->bdaddr), ev->link_type);
1813
1814 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1815
1816 if ((mask & HCI_LM_ACCEPT) &&
1817 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1818 /* Connection accepted */
1819 struct inquiry_entry *ie;
1820 struct hci_conn *conn;
1821
1822 hci_dev_lock(hdev);
1823
1824 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1825 if (ie)
1826 memcpy(ie->data.dev_class, ev->dev_class, 3);
1827
1828 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1829 if (!conn) {
1830 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1831 if (!conn) {
1832 BT_ERR("No memory for new connection");
1833 hci_dev_unlock(hdev);
1834 return;
1835 }
1836 }
1837
1838 memcpy(conn->dev_class, ev->dev_class, 3);
1839 conn->state = BT_CONNECT;
1840
1841 hci_dev_unlock(hdev);
1842
1843 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1844 struct hci_cp_accept_conn_req cp;
1845
1846 bacpy(&cp.bdaddr, &ev->bdaddr);
1847
1848 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1849 cp.role = 0x00; /* Become master */
1850 else
1851 cp.role = 0x01; /* Remain slave */
1852
1853 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1854 sizeof(cp), &cp);
1855 } else {
1856 struct hci_cp_accept_sync_conn_req cp;
1857
1858 bacpy(&cp.bdaddr, &ev->bdaddr);
1859 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1860
1861 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1862 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1863 cp.max_latency = cpu_to_le16(0xffff);
1864 cp.content_format = cpu_to_le16(hdev->voice_setting);
1865 cp.retrans_effort = 0xff;
1866
1867 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1868 sizeof(cp), &cp);
1869 }
1870 } else {
1871 /* Connection rejected */
1872 struct hci_cp_reject_conn_req cp;
1873
1874 bacpy(&cp.bdaddr, &ev->bdaddr);
1875 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1876 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1877 }
1878 }
1879
1880 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1881 {
1882 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1883 struct hci_conn *conn;
1884
1885 BT_DBG("%s status %d", hdev->name, ev->status);
1886
1887 hci_dev_lock(hdev);
1888
1889 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1890 if (!conn)
1891 goto unlock;
1892
1893 if (ev->status == 0)
1894 conn->state = BT_CLOSED;
1895
1896 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1897 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1898 if (ev->status != 0)
1899 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1900 conn->dst_type, ev->status);
1901 else
1902 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1903 conn->dst_type);
1904 }
1905
1906 if (ev->status == 0) {
1907 hci_proto_disconn_cfm(conn, ev->reason);
1908 hci_conn_del(conn);
1909 }
1910
1911 unlock:
1912 hci_dev_unlock(hdev);
1913 }
1914
1915 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1916 {
1917 struct hci_ev_auth_complete *ev = (void *) skb->data;
1918 struct hci_conn *conn;
1919
1920 BT_DBG("%s status %d", hdev->name, ev->status);
1921
1922 hci_dev_lock(hdev);
1923
1924 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1925 if (!conn)
1926 goto unlock;
1927
1928 if (!ev->status) {
1929 if (!hci_conn_ssp_enabled(conn) &&
1930 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1931 BT_INFO("re-auth of legacy device is not possible.");
1932 } else {
1933 conn->link_mode |= HCI_LM_AUTH;
1934 conn->sec_level = conn->pending_sec_level;
1935 }
1936 } else {
1937 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1938 ev->status);
1939 }
1940
1941 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1942 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1943
1944 if (conn->state == BT_CONFIG) {
1945 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1946 struct hci_cp_set_conn_encrypt cp;
1947 cp.handle = ev->handle;
1948 cp.encrypt = 0x01;
1949 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1950 &cp);
1951 } else {
1952 conn->state = BT_CONNECTED;
1953 hci_proto_connect_cfm(conn, ev->status);
1954 hci_conn_put(conn);
1955 }
1956 } else {
1957 hci_auth_cfm(conn, ev->status);
1958
1959 hci_conn_hold(conn);
1960 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1961 hci_conn_put(conn);
1962 }
1963
1964 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1965 if (!ev->status) {
1966 struct hci_cp_set_conn_encrypt cp;
1967 cp.handle = ev->handle;
1968 cp.encrypt = 0x01;
1969 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1970 &cp);
1971 } else {
1972 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1973 hci_encrypt_cfm(conn, ev->status, 0x00);
1974 }
1975 }
1976
1977 unlock:
1978 hci_dev_unlock(hdev);
1979 }
1980
1981 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1982 {
1983 struct hci_ev_remote_name *ev = (void *) skb->data;
1984 struct hci_conn *conn;
1985
1986 BT_DBG("%s", hdev->name);
1987
1988 hci_conn_check_pending(hdev);
1989
1990 hci_dev_lock(hdev);
1991
1992 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1993
1994 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1995 goto check_auth;
1996
1997 if (ev->status == 0)
1998 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1999 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2000 else
2001 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2002
2003 check_auth:
2004 if (!conn)
2005 goto unlock;
2006
2007 if (!hci_outgoing_auth_needed(hdev, conn))
2008 goto unlock;
2009
2010 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2011 struct hci_cp_auth_requested cp;
2012 cp.handle = __cpu_to_le16(conn->handle);
2013 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2014 }
2015
2016 unlock:
2017 hci_dev_unlock(hdev);
2018 }
2019
2020 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2021 {
2022 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2023 struct hci_conn *conn;
2024
2025 BT_DBG("%s status %d", hdev->name, ev->status);
2026
2027 hci_dev_lock(hdev);
2028
2029 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2030 if (conn) {
2031 if (!ev->status) {
2032 if (ev->encrypt) {
2033 /* Encryption implies authentication */
2034 conn->link_mode |= HCI_LM_AUTH;
2035 conn->link_mode |= HCI_LM_ENCRYPT;
2036 conn->sec_level = conn->pending_sec_level;
2037 } else
2038 conn->link_mode &= ~HCI_LM_ENCRYPT;
2039 }
2040
2041 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2042
2043 if (conn->state == BT_CONFIG) {
2044 if (!ev->status)
2045 conn->state = BT_CONNECTED;
2046
2047 hci_proto_connect_cfm(conn, ev->status);
2048 hci_conn_put(conn);
2049 } else
2050 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2051 }
2052
2053 hci_dev_unlock(hdev);
2054 }
2055
2056 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2057 {
2058 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2059 struct hci_conn *conn;
2060
2061 BT_DBG("%s status %d", hdev->name, ev->status);
2062
2063 hci_dev_lock(hdev);
2064
2065 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2066 if (conn) {
2067 if (!ev->status)
2068 conn->link_mode |= HCI_LM_SECURE;
2069
2070 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2071
2072 hci_key_change_cfm(conn, ev->status);
2073 }
2074
2075 hci_dev_unlock(hdev);
2076 }
2077
2078 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2079 {
2080 struct hci_ev_remote_features *ev = (void *) skb->data;
2081 struct hci_conn *conn;
2082
2083 BT_DBG("%s status %d", hdev->name, ev->status);
2084
2085 hci_dev_lock(hdev);
2086
2087 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2088 if (!conn)
2089 goto unlock;
2090
2091 if (!ev->status)
2092 memcpy(conn->features, ev->features, 8);
2093
2094 if (conn->state != BT_CONFIG)
2095 goto unlock;
2096
2097 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2098 struct hci_cp_read_remote_ext_features cp;
2099 cp.handle = ev->handle;
2100 cp.page = 0x01;
2101 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2102 sizeof(cp), &cp);
2103 goto unlock;
2104 }
2105
2106 if (!ev->status) {
2107 struct hci_cp_remote_name_req cp;
2108 memset(&cp, 0, sizeof(cp));
2109 bacpy(&cp.bdaddr, &conn->dst);
2110 cp.pscan_rep_mode = 0x02;
2111 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2112 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2113 mgmt_device_connected(hdev, &conn->dst, conn->type,
2114 conn->dst_type, 0, NULL, 0,
2115 conn->dev_class);
2116
2117 if (!hci_outgoing_auth_needed(hdev, conn)) {
2118 conn->state = BT_CONNECTED;
2119 hci_proto_connect_cfm(conn, ev->status);
2120 hci_conn_put(conn);
2121 }
2122
2123 unlock:
2124 hci_dev_unlock(hdev);
2125 }
2126
2127 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2128 {
2129 BT_DBG("%s", hdev->name);
2130 }
2131
2132 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2133 {
2134 BT_DBG("%s", hdev->name);
2135 }
2136
2137 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2138 {
2139 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2140 __u16 opcode;
2141
2142 skb_pull(skb, sizeof(*ev));
2143
2144 opcode = __le16_to_cpu(ev->opcode);
2145
2146 switch (opcode) {
2147 case HCI_OP_INQUIRY_CANCEL:
2148 hci_cc_inquiry_cancel(hdev, skb);
2149 break;
2150
2151 case HCI_OP_EXIT_PERIODIC_INQ:
2152 hci_cc_exit_periodic_inq(hdev, skb);
2153 break;
2154
2155 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2156 hci_cc_remote_name_req_cancel(hdev, skb);
2157 break;
2158
2159 case HCI_OP_ROLE_DISCOVERY:
2160 hci_cc_role_discovery(hdev, skb);
2161 break;
2162
2163 case HCI_OP_READ_LINK_POLICY:
2164 hci_cc_read_link_policy(hdev, skb);
2165 break;
2166
2167 case HCI_OP_WRITE_LINK_POLICY:
2168 hci_cc_write_link_policy(hdev, skb);
2169 break;
2170
2171 case HCI_OP_READ_DEF_LINK_POLICY:
2172 hci_cc_read_def_link_policy(hdev, skb);
2173 break;
2174
2175 case HCI_OP_WRITE_DEF_LINK_POLICY:
2176 hci_cc_write_def_link_policy(hdev, skb);
2177 break;
2178
2179 case HCI_OP_RESET:
2180 hci_cc_reset(hdev, skb);
2181 break;
2182
2183 case HCI_OP_WRITE_LOCAL_NAME:
2184 hci_cc_write_local_name(hdev, skb);
2185 break;
2186
2187 case HCI_OP_READ_LOCAL_NAME:
2188 hci_cc_read_local_name(hdev, skb);
2189 break;
2190
2191 case HCI_OP_WRITE_AUTH_ENABLE:
2192 hci_cc_write_auth_enable(hdev, skb);
2193 break;
2194
2195 case HCI_OP_WRITE_ENCRYPT_MODE:
2196 hci_cc_write_encrypt_mode(hdev, skb);
2197 break;
2198
2199 case HCI_OP_WRITE_SCAN_ENABLE:
2200 hci_cc_write_scan_enable(hdev, skb);
2201 break;
2202
2203 case HCI_OP_READ_CLASS_OF_DEV:
2204 hci_cc_read_class_of_dev(hdev, skb);
2205 break;
2206
2207 case HCI_OP_WRITE_CLASS_OF_DEV:
2208 hci_cc_write_class_of_dev(hdev, skb);
2209 break;
2210
2211 case HCI_OP_READ_VOICE_SETTING:
2212 hci_cc_read_voice_setting(hdev, skb);
2213 break;
2214
2215 case HCI_OP_WRITE_VOICE_SETTING:
2216 hci_cc_write_voice_setting(hdev, skb);
2217 break;
2218
2219 case HCI_OP_HOST_BUFFER_SIZE:
2220 hci_cc_host_buffer_size(hdev, skb);
2221 break;
2222
2223 case HCI_OP_WRITE_SSP_MODE:
2224 hci_cc_write_ssp_mode(hdev, skb);
2225 break;
2226
2227 case HCI_OP_READ_LOCAL_VERSION:
2228 hci_cc_read_local_version(hdev, skb);
2229 break;
2230
2231 case HCI_OP_READ_LOCAL_COMMANDS:
2232 hci_cc_read_local_commands(hdev, skb);
2233 break;
2234
2235 case HCI_OP_READ_LOCAL_FEATURES:
2236 hci_cc_read_local_features(hdev, skb);
2237 break;
2238
2239 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2240 hci_cc_read_local_ext_features(hdev, skb);
2241 break;
2242
2243 case HCI_OP_READ_BUFFER_SIZE:
2244 hci_cc_read_buffer_size(hdev, skb);
2245 break;
2246
2247 case HCI_OP_READ_BD_ADDR:
2248 hci_cc_read_bd_addr(hdev, skb);
2249 break;
2250
2251 case HCI_OP_READ_DATA_BLOCK_SIZE:
2252 hci_cc_read_data_block_size(hdev, skb);
2253 break;
2254
2255 case HCI_OP_WRITE_CA_TIMEOUT:
2256 hci_cc_write_ca_timeout(hdev, skb);
2257 break;
2258
2259 case HCI_OP_READ_FLOW_CONTROL_MODE:
2260 hci_cc_read_flow_control_mode(hdev, skb);
2261 break;
2262
2263 case HCI_OP_READ_LOCAL_AMP_INFO:
2264 hci_cc_read_local_amp_info(hdev, skb);
2265 break;
2266
2267 case HCI_OP_DELETE_STORED_LINK_KEY:
2268 hci_cc_delete_stored_link_key(hdev, skb);
2269 break;
2270
2271 case HCI_OP_SET_EVENT_MASK:
2272 hci_cc_set_event_mask(hdev, skb);
2273 break;
2274
2275 case HCI_OP_WRITE_INQUIRY_MODE:
2276 hci_cc_write_inquiry_mode(hdev, skb);
2277 break;
2278
2279 case HCI_OP_READ_INQ_RSP_TX_POWER:
2280 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2281 break;
2282
2283 case HCI_OP_SET_EVENT_FLT:
2284 hci_cc_set_event_flt(hdev, skb);
2285 break;
2286
2287 case HCI_OP_PIN_CODE_REPLY:
2288 hci_cc_pin_code_reply(hdev, skb);
2289 break;
2290
2291 case HCI_OP_PIN_CODE_NEG_REPLY:
2292 hci_cc_pin_code_neg_reply(hdev, skb);
2293 break;
2294
2295 case HCI_OP_READ_LOCAL_OOB_DATA:
2296 hci_cc_read_local_oob_data_reply(hdev, skb);
2297 break;
2298
2299 case HCI_OP_LE_READ_BUFFER_SIZE:
2300 hci_cc_le_read_buffer_size(hdev, skb);
2301 break;
2302
2303 case HCI_OP_USER_CONFIRM_REPLY:
2304 hci_cc_user_confirm_reply(hdev, skb);
2305 break;
2306
2307 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2308 hci_cc_user_confirm_neg_reply(hdev, skb);
2309 break;
2310
2311 case HCI_OP_USER_PASSKEY_REPLY:
2312 hci_cc_user_passkey_reply(hdev, skb);
2313 break;
2314
2315 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2316 hci_cc_user_passkey_neg_reply(hdev, skb);
2317
2318 case HCI_OP_LE_SET_SCAN_PARAM:
2319 hci_cc_le_set_scan_param(hdev, skb);
2320 break;
2321
2322 case HCI_OP_LE_SET_SCAN_ENABLE:
2323 hci_cc_le_set_scan_enable(hdev, skb);
2324 break;
2325
2326 case HCI_OP_LE_LTK_REPLY:
2327 hci_cc_le_ltk_reply(hdev, skb);
2328 break;
2329
2330 case HCI_OP_LE_LTK_NEG_REPLY:
2331 hci_cc_le_ltk_neg_reply(hdev, skb);
2332 break;
2333
2334 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2335 hci_cc_write_le_host_supported(hdev, skb);
2336 break;
2337
2338 default:
2339 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2340 break;
2341 }
2342
2343 if (ev->opcode != HCI_OP_NOP)
2344 del_timer(&hdev->cmd_timer);
2345
2346 if (ev->ncmd) {
2347 atomic_set(&hdev->cmd_cnt, 1);
2348 if (!skb_queue_empty(&hdev->cmd_q))
2349 queue_work(hdev->workqueue, &hdev->cmd_work);
2350 }
2351 }
2352
2353 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2354 {
2355 struct hci_ev_cmd_status *ev = (void *) skb->data;
2356 __u16 opcode;
2357
2358 skb_pull(skb, sizeof(*ev));
2359
2360 opcode = __le16_to_cpu(ev->opcode);
2361
2362 switch (opcode) {
2363 case HCI_OP_INQUIRY:
2364 hci_cs_inquiry(hdev, ev->status);
2365 break;
2366
2367 case HCI_OP_CREATE_CONN:
2368 hci_cs_create_conn(hdev, ev->status);
2369 break;
2370
2371 case HCI_OP_ADD_SCO:
2372 hci_cs_add_sco(hdev, ev->status);
2373 break;
2374
2375 case HCI_OP_AUTH_REQUESTED:
2376 hci_cs_auth_requested(hdev, ev->status);
2377 break;
2378
2379 case HCI_OP_SET_CONN_ENCRYPT:
2380 hci_cs_set_conn_encrypt(hdev, ev->status);
2381 break;
2382
2383 case HCI_OP_REMOTE_NAME_REQ:
2384 hci_cs_remote_name_req(hdev, ev->status);
2385 break;
2386
2387 case HCI_OP_READ_REMOTE_FEATURES:
2388 hci_cs_read_remote_features(hdev, ev->status);
2389 break;
2390
2391 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2392 hci_cs_read_remote_ext_features(hdev, ev->status);
2393 break;
2394
2395 case HCI_OP_SETUP_SYNC_CONN:
2396 hci_cs_setup_sync_conn(hdev, ev->status);
2397 break;
2398
2399 case HCI_OP_SNIFF_MODE:
2400 hci_cs_sniff_mode(hdev, ev->status);
2401 break;
2402
2403 case HCI_OP_EXIT_SNIFF_MODE:
2404 hci_cs_exit_sniff_mode(hdev, ev->status);
2405 break;
2406
2407 case HCI_OP_DISCONNECT:
2408 hci_cs_disconnect(hdev, ev->status);
2409 break;
2410
2411 case HCI_OP_LE_CREATE_CONN:
2412 hci_cs_le_create_conn(hdev, ev->status);
2413 break;
2414
2415 case HCI_OP_LE_START_ENC:
2416 hci_cs_le_start_enc(hdev, ev->status);
2417 break;
2418
2419 default:
2420 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2421 break;
2422 }
2423
2424 if (ev->opcode != HCI_OP_NOP)
2425 del_timer(&hdev->cmd_timer);
2426
2427 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2428 atomic_set(&hdev->cmd_cnt, 1);
2429 if (!skb_queue_empty(&hdev->cmd_q))
2430 queue_work(hdev->workqueue, &hdev->cmd_work);
2431 }
2432 }
2433
2434 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2435 {
2436 struct hci_ev_role_change *ev = (void *) skb->data;
2437 struct hci_conn *conn;
2438
2439 BT_DBG("%s status %d", hdev->name, ev->status);
2440
2441 hci_dev_lock(hdev);
2442
2443 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2444 if (conn) {
2445 if (!ev->status) {
2446 if (ev->role)
2447 conn->link_mode &= ~HCI_LM_MASTER;
2448 else
2449 conn->link_mode |= HCI_LM_MASTER;
2450 }
2451
2452 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2453
2454 hci_role_switch_cfm(conn, ev->status, ev->role);
2455 }
2456
2457 hci_dev_unlock(hdev);
2458 }
2459
2460 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2461 {
2462 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2463 int i;
2464
2465 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2466 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2467 return;
2468 }
2469
2470 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2471 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2472 BT_DBG("%s bad parameters", hdev->name);
2473 return;
2474 }
2475
2476 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2477
2478 for (i = 0; i < ev->num_hndl; i++) {
2479 struct hci_comp_pkts_info *info = &ev->handles[i];
2480 struct hci_conn *conn;
2481 __u16 handle, count;
2482
2483 handle = __le16_to_cpu(info->handle);
2484 count = __le16_to_cpu(info->count);
2485
2486 conn = hci_conn_hash_lookup_handle(hdev, handle);
2487 if (!conn)
2488 continue;
2489
2490 conn->sent -= count;
2491
2492 switch (conn->type) {
2493 case ACL_LINK:
2494 hdev->acl_cnt += count;
2495 if (hdev->acl_cnt > hdev->acl_pkts)
2496 hdev->acl_cnt = hdev->acl_pkts;
2497 break;
2498
2499 case LE_LINK:
2500 if (hdev->le_pkts) {
2501 hdev->le_cnt += count;
2502 if (hdev->le_cnt > hdev->le_pkts)
2503 hdev->le_cnt = hdev->le_pkts;
2504 } else {
2505 hdev->acl_cnt += count;
2506 if (hdev->acl_cnt > hdev->acl_pkts)
2507 hdev->acl_cnt = hdev->acl_pkts;
2508 }
2509 break;
2510
2511 case SCO_LINK:
2512 hdev->sco_cnt += count;
2513 if (hdev->sco_cnt > hdev->sco_pkts)
2514 hdev->sco_cnt = hdev->sco_pkts;
2515 break;
2516
2517 default:
2518 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2519 break;
2520 }
2521 }
2522
2523 queue_work(hdev->workqueue, &hdev->tx_work);
2524 }
2525
2526 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2527 struct sk_buff *skb)
2528 {
2529 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2530 int i;
2531
2532 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2533 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2534 return;
2535 }
2536
2537 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2538 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2539 BT_DBG("%s bad parameters", hdev->name);
2540 return;
2541 }
2542
2543 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2544 ev->num_hndl);
2545
2546 for (i = 0; i < ev->num_hndl; i++) {
2547 struct hci_comp_blocks_info *info = &ev->handles[i];
2548 struct hci_conn *conn;
2549 __u16 handle, block_count;
2550
2551 handle = __le16_to_cpu(info->handle);
2552 block_count = __le16_to_cpu(info->blocks);
2553
2554 conn = hci_conn_hash_lookup_handle(hdev, handle);
2555 if (!conn)
2556 continue;
2557
2558 conn->sent -= block_count;
2559
2560 switch (conn->type) {
2561 case ACL_LINK:
2562 hdev->block_cnt += block_count;
2563 if (hdev->block_cnt > hdev->num_blocks)
2564 hdev->block_cnt = hdev->num_blocks;
2565 break;
2566
2567 default:
2568 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2569 break;
2570 }
2571 }
2572
2573 queue_work(hdev->workqueue, &hdev->tx_work);
2574 }
2575
2576 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2577 {
2578 struct hci_ev_mode_change *ev = (void *) skb->data;
2579 struct hci_conn *conn;
2580
2581 BT_DBG("%s status %d", hdev->name, ev->status);
2582
2583 hci_dev_lock(hdev);
2584
2585 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2586 if (conn) {
2587 conn->mode = ev->mode;
2588 conn->interval = __le16_to_cpu(ev->interval);
2589
2590 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2591 if (conn->mode == HCI_CM_ACTIVE)
2592 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2593 else
2594 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2595 }
2596
2597 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2598 hci_sco_setup(conn, ev->status);
2599 }
2600
2601 hci_dev_unlock(hdev);
2602 }
2603
2604 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2605 {
2606 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2607 struct hci_conn *conn;
2608
2609 BT_DBG("%s", hdev->name);
2610
2611 hci_dev_lock(hdev);
2612
2613 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2614 if (!conn)
2615 goto unlock;
2616
2617 if (conn->state == BT_CONNECTED) {
2618 hci_conn_hold(conn);
2619 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2620 hci_conn_put(conn);
2621 }
2622
2623 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2624 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2625 sizeof(ev->bdaddr), &ev->bdaddr);
2626 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2627 u8 secure;
2628
2629 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2630 secure = 1;
2631 else
2632 secure = 0;
2633
2634 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2635 }
2636
2637 unlock:
2638 hci_dev_unlock(hdev);
2639 }
2640
2641 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2642 {
2643 struct hci_ev_link_key_req *ev = (void *) skb->data;
2644 struct hci_cp_link_key_reply cp;
2645 struct hci_conn *conn;
2646 struct link_key *key;
2647
2648 BT_DBG("%s", hdev->name);
2649
2650 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2651 return;
2652
2653 hci_dev_lock(hdev);
2654
2655 key = hci_find_link_key(hdev, &ev->bdaddr);
2656 if (!key) {
2657 BT_DBG("%s link key not found for %s", hdev->name,
2658 batostr(&ev->bdaddr));
2659 goto not_found;
2660 }
2661
2662 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2663 batostr(&ev->bdaddr));
2664
2665 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2666 key->type == HCI_LK_DEBUG_COMBINATION) {
2667 BT_DBG("%s ignoring debug key", hdev->name);
2668 goto not_found;
2669 }
2670
2671 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2672 if (conn) {
2673 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2674 conn->auth_type != 0xff &&
2675 (conn->auth_type & 0x01)) {
2676 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2677 goto not_found;
2678 }
2679
2680 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2681 conn->pending_sec_level == BT_SECURITY_HIGH) {
2682 BT_DBG("%s ignoring key unauthenticated for high \
2683 security", hdev->name);
2684 goto not_found;
2685 }
2686
2687 conn->key_type = key->type;
2688 conn->pin_length = key->pin_len;
2689 }
2690
2691 bacpy(&cp.bdaddr, &ev->bdaddr);
2692 memcpy(cp.link_key, key->val, 16);
2693
2694 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2695
2696 hci_dev_unlock(hdev);
2697
2698 return;
2699
2700 not_found:
2701 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2702 hci_dev_unlock(hdev);
2703 }
2704
2705 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2706 {
2707 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2708 struct hci_conn *conn;
2709 u8 pin_len = 0;
2710
2711 BT_DBG("%s", hdev->name);
2712
2713 hci_dev_lock(hdev);
2714
2715 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2716 if (conn) {
2717 hci_conn_hold(conn);
2718 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2719 pin_len = conn->pin_length;
2720
2721 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2722 conn->key_type = ev->key_type;
2723
2724 hci_conn_put(conn);
2725 }
2726
2727 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2728 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2729 ev->key_type, pin_len);
2730
2731 hci_dev_unlock(hdev);
2732 }
2733
2734 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2735 {
2736 struct hci_ev_clock_offset *ev = (void *) skb->data;
2737 struct hci_conn *conn;
2738
2739 BT_DBG("%s status %d", hdev->name, ev->status);
2740
2741 hci_dev_lock(hdev);
2742
2743 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2744 if (conn && !ev->status) {
2745 struct inquiry_entry *ie;
2746
2747 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2748 if (ie) {
2749 ie->data.clock_offset = ev->clock_offset;
2750 ie->timestamp = jiffies;
2751 }
2752 }
2753
2754 hci_dev_unlock(hdev);
2755 }
2756
2757 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2758 {
2759 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2760 struct hci_conn *conn;
2761
2762 BT_DBG("%s status %d", hdev->name, ev->status);
2763
2764 hci_dev_lock(hdev);
2765
2766 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2767 if (conn && !ev->status)
2768 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2769
2770 hci_dev_unlock(hdev);
2771 }
2772
2773 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2774 {
2775 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2776 struct inquiry_entry *ie;
2777
2778 BT_DBG("%s", hdev->name);
2779
2780 hci_dev_lock(hdev);
2781
2782 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2783 if (ie) {
2784 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2785 ie->timestamp = jiffies;
2786 }
2787
2788 hci_dev_unlock(hdev);
2789 }
2790
2791 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2792 {
2793 struct inquiry_data data;
2794 int num_rsp = *((__u8 *) skb->data);
2795 bool name_known, ssp;
2796
2797 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2798
2799 if (!num_rsp)
2800 return;
2801
2802 hci_dev_lock(hdev);
2803
2804 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2805 struct inquiry_info_with_rssi_and_pscan_mode *info;
2806 info = (void *) (skb->data + 1);
2807
2808 for (; num_rsp; num_rsp--, info++) {
2809 bacpy(&data.bdaddr, &info->bdaddr);
2810 data.pscan_rep_mode = info->pscan_rep_mode;
2811 data.pscan_period_mode = info->pscan_period_mode;
2812 data.pscan_mode = info->pscan_mode;
2813 memcpy(data.dev_class, info->dev_class, 3);
2814 data.clock_offset = info->clock_offset;
2815 data.rssi = info->rssi;
2816 data.ssp_mode = 0x00;
2817
2818 name_known = hci_inquiry_cache_update(hdev, &data,
2819 false, &ssp);
2820 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2821 info->dev_class, info->rssi,
2822 !name_known, ssp, NULL, 0);
2823 }
2824 } else {
2825 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2826
2827 for (; num_rsp; num_rsp--, info++) {
2828 bacpy(&data.bdaddr, &info->bdaddr);
2829 data.pscan_rep_mode = info->pscan_rep_mode;
2830 data.pscan_period_mode = info->pscan_period_mode;
2831 data.pscan_mode = 0x00;
2832 memcpy(data.dev_class, info->dev_class, 3);
2833 data.clock_offset = info->clock_offset;
2834 data.rssi = info->rssi;
2835 data.ssp_mode = 0x00;
2836 name_known = hci_inquiry_cache_update(hdev, &data,
2837 false, &ssp);
2838 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2839 info->dev_class, info->rssi,
2840 !name_known, ssp, NULL, 0);
2841 }
2842 }
2843
2844 hci_dev_unlock(hdev);
2845 }
2846
2847 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2848 {
2849 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2850 struct hci_conn *conn;
2851
2852 BT_DBG("%s", hdev->name);
2853
2854 hci_dev_lock(hdev);
2855
2856 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2857 if (!conn)
2858 goto unlock;
2859
2860 if (!ev->status && ev->page == 0x01) {
2861 struct inquiry_entry *ie;
2862
2863 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2864 if (ie)
2865 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2866
2867 if (ev->features[0] & LMP_HOST_SSP)
2868 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2869 }
2870
2871 if (conn->state != BT_CONFIG)
2872 goto unlock;
2873
2874 if (!ev->status) {
2875 struct hci_cp_remote_name_req cp;
2876 memset(&cp, 0, sizeof(cp));
2877 bacpy(&cp.bdaddr, &conn->dst);
2878 cp.pscan_rep_mode = 0x02;
2879 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2880 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2881 mgmt_device_connected(hdev, &conn->dst, conn->type,
2882 conn->dst_type, 0, NULL, 0,
2883 conn->dev_class);
2884
2885 if (!hci_outgoing_auth_needed(hdev, conn)) {
2886 conn->state = BT_CONNECTED;
2887 hci_proto_connect_cfm(conn, ev->status);
2888 hci_conn_put(conn);
2889 }
2890
2891 unlock:
2892 hci_dev_unlock(hdev);
2893 }
2894
2895 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2896 {
2897 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2898 struct hci_conn *conn;
2899
2900 BT_DBG("%s status %d", hdev->name, ev->status);
2901
2902 hci_dev_lock(hdev);
2903
2904 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2905 if (!conn) {
2906 if (ev->link_type == ESCO_LINK)
2907 goto unlock;
2908
2909 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2910 if (!conn)
2911 goto unlock;
2912
2913 conn->type = SCO_LINK;
2914 }
2915
2916 switch (ev->status) {
2917 case 0x00:
2918 conn->handle = __le16_to_cpu(ev->handle);
2919 conn->state = BT_CONNECTED;
2920
2921 hci_conn_hold_device(conn);
2922 hci_conn_add_sysfs(conn);
2923 break;
2924
2925 case 0x11: /* Unsupported Feature or Parameter Value */
2926 case 0x1c: /* SCO interval rejected */
2927 case 0x1a: /* Unsupported Remote Feature */
2928 case 0x1f: /* Unspecified error */
2929 if (conn->out && conn->attempt < 2) {
2930 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2931 (hdev->esco_type & EDR_ESCO_MASK);
2932 hci_setup_sync(conn, conn->link->handle);
2933 goto unlock;
2934 }
2935 /* fall through */
2936
2937 default:
2938 conn->state = BT_CLOSED;
2939 break;
2940 }
2941
2942 hci_proto_connect_cfm(conn, ev->status);
2943 if (ev->status)
2944 hci_conn_del(conn);
2945
2946 unlock:
2947 hci_dev_unlock(hdev);
2948 }
2949
2950 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2951 {
2952 BT_DBG("%s", hdev->name);
2953 }
2954
2955 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2956 {
2957 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2958
2959 BT_DBG("%s status %d", hdev->name, ev->status);
2960 }
2961
2962 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2963 {
2964 struct inquiry_data data;
2965 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2966 int num_rsp = *((__u8 *) skb->data);
2967
2968 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2969
2970 if (!num_rsp)
2971 return;
2972
2973 hci_dev_lock(hdev);
2974
2975 for (; num_rsp; num_rsp--, info++) {
2976 bool name_known, ssp;
2977
2978 bacpy(&data.bdaddr, &info->bdaddr);
2979 data.pscan_rep_mode = info->pscan_rep_mode;
2980 data.pscan_period_mode = info->pscan_period_mode;
2981 data.pscan_mode = 0x00;
2982 memcpy(data.dev_class, info->dev_class, 3);
2983 data.clock_offset = info->clock_offset;
2984 data.rssi = info->rssi;
2985 data.ssp_mode = 0x01;
2986
2987 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2988 name_known = eir_has_data_type(info->data,
2989 sizeof(info->data),
2990 EIR_NAME_COMPLETE);
2991 else
2992 name_known = true;
2993
2994 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2995 &ssp);
2996 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2997 info->dev_class, info->rssi,
2998 !name_known, ssp, info->data,
2999 sizeof(info->data));
3000 }
3001
3002 hci_dev_unlock(hdev);
3003 }
3004
3005 static inline u8 hci_get_auth_req(struct hci_conn *conn)
3006 {
3007 /* If remote requests dedicated bonding follow that lead */
3008 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3009 /* If both remote and local IO capabilities allow MITM
3010 * protection then require it, otherwise don't */
3011 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3012 return 0x02;
3013 else
3014 return 0x03;
3015 }
3016
3017 /* If remote requests no-bonding follow that lead */
3018 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3019 return conn->remote_auth | (conn->auth_type & 0x01);
3020
3021 return conn->auth_type;
3022 }
3023
3024 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3025 {
3026 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3027 struct hci_conn *conn;
3028
3029 BT_DBG("%s", hdev->name);
3030
3031 hci_dev_lock(hdev);
3032
3033 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3034 if (!conn)
3035 goto unlock;
3036
3037 hci_conn_hold(conn);
3038
3039 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3040 goto unlock;
3041
3042 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3043 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3044 struct hci_cp_io_capability_reply cp;
3045
3046 bacpy(&cp.bdaddr, &ev->bdaddr);
3047 /* Change the IO capability from KeyboardDisplay
3048 * to DisplayYesNo as it is not supported by BT spec. */
3049 cp.capability = (conn->io_capability == 0x04) ?
3050 0x01 : conn->io_capability;
3051 conn->auth_type = hci_get_auth_req(conn);
3052 cp.authentication = conn->auth_type;
3053
3054 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3055 hci_find_remote_oob_data(hdev, &conn->dst))
3056 cp.oob_data = 0x01;
3057 else
3058 cp.oob_data = 0x00;
3059
3060 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3061 sizeof(cp), &cp);
3062 } else {
3063 struct hci_cp_io_capability_neg_reply cp;
3064
3065 bacpy(&cp.bdaddr, &ev->bdaddr);
3066 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3067
3068 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3069 sizeof(cp), &cp);
3070 }
3071
3072 unlock:
3073 hci_dev_unlock(hdev);
3074 }
3075
3076 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3077 {
3078 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3079 struct hci_conn *conn;
3080
3081 BT_DBG("%s", hdev->name);
3082
3083 hci_dev_lock(hdev);
3084
3085 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3086 if (!conn)
3087 goto unlock;
3088
3089 conn->remote_cap = ev->capability;
3090 conn->remote_auth = ev->authentication;
3091 if (ev->oob_data)
3092 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3093
3094 unlock:
3095 hci_dev_unlock(hdev);
3096 }
3097
3098 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3099 struct sk_buff *skb)
3100 {
3101 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3102 int loc_mitm, rem_mitm, confirm_hint = 0;
3103 struct hci_conn *conn;
3104
3105 BT_DBG("%s", hdev->name);
3106
3107 hci_dev_lock(hdev);
3108
3109 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3110 goto unlock;
3111
3112 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3113 if (!conn)
3114 goto unlock;
3115
3116 loc_mitm = (conn->auth_type & 0x01);
3117 rem_mitm = (conn->remote_auth & 0x01);
3118
3119 /* If we require MITM but the remote device can't provide that
3120 * (it has NoInputNoOutput) then reject the confirmation
3121 * request. The only exception is when we're dedicated bonding
3122 * initiators (connect_cfm_cb set) since then we always have the MITM
3123 * bit set. */
3124 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3125 BT_DBG("Rejecting request: remote device can't provide MITM");
3126 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3127 sizeof(ev->bdaddr), &ev->bdaddr);
3128 goto unlock;
3129 }
3130
3131 /* If no side requires MITM protection; auto-accept */
3132 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3133 (!rem_mitm || conn->io_capability == 0x03)) {
3134
3135 /* If we're not the initiators request authorization to
3136 * proceed from user space (mgmt_user_confirm with
3137 * confirm_hint set to 1). */
3138 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3139 BT_DBG("Confirming auto-accept as acceptor");
3140 confirm_hint = 1;
3141 goto confirm;
3142 }
3143
3144 BT_DBG("Auto-accept of user confirmation with %ums delay",
3145 hdev->auto_accept_delay);
3146
3147 if (hdev->auto_accept_delay > 0) {
3148 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3149 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3150 goto unlock;
3151 }
3152
3153 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3154 sizeof(ev->bdaddr), &ev->bdaddr);
3155 goto unlock;
3156 }
3157
3158 confirm:
3159 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3160 confirm_hint);
3161
3162 unlock:
3163 hci_dev_unlock(hdev);
3164 }
3165
3166 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3167 struct sk_buff *skb)
3168 {
3169 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3170
3171 BT_DBG("%s", hdev->name);
3172
3173 hci_dev_lock(hdev);
3174
3175 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3176 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3177
3178 hci_dev_unlock(hdev);
3179 }
3180
3181 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3182 {
3183 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3184 struct hci_conn *conn;
3185
3186 BT_DBG("%s", hdev->name);
3187
3188 hci_dev_lock(hdev);
3189
3190 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3191 if (!conn)
3192 goto unlock;
3193
3194 /* To avoid duplicate auth_failed events to user space we check
3195 * the HCI_CONN_AUTH_PEND flag which will be set if we
3196 * initiated the authentication. A traditional auth_complete
3197 * event gets always produced as initiator and is also mapped to
3198 * the mgmt_auth_failed event */
3199 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3200 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3201 ev->status);
3202
3203 hci_conn_put(conn);
3204
3205 unlock:
3206 hci_dev_unlock(hdev);
3207 }
3208
3209 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3210 {
3211 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3212 struct inquiry_entry *ie;
3213
3214 BT_DBG("%s", hdev->name);
3215
3216 hci_dev_lock(hdev);
3217
3218 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3219 if (ie)
3220 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3221
3222 hci_dev_unlock(hdev);
3223 }
3224
3225 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3226 struct sk_buff *skb)
3227 {
3228 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3229 struct oob_data *data;
3230
3231 BT_DBG("%s", hdev->name);
3232
3233 hci_dev_lock(hdev);
3234
3235 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3236 goto unlock;
3237
3238 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3239 if (data) {
3240 struct hci_cp_remote_oob_data_reply cp;
3241
3242 bacpy(&cp.bdaddr, &ev->bdaddr);
3243 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3244 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3245
3246 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3247 &cp);
3248 } else {
3249 struct hci_cp_remote_oob_data_neg_reply cp;
3250
3251 bacpy(&cp.bdaddr, &ev->bdaddr);
3252 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3253 &cp);
3254 }
3255
3256 unlock:
3257 hci_dev_unlock(hdev);
3258 }
3259
3260 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3261 {
3262 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3263 struct hci_conn *conn;
3264
3265 BT_DBG("%s status %d", hdev->name, ev->status);
3266
3267 hci_dev_lock(hdev);
3268
3269 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3270 if (!conn) {
3271 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3272 if (!conn) {
3273 BT_ERR("No memory for new connection");
3274 hci_dev_unlock(hdev);
3275 return;
3276 }
3277
3278 conn->dst_type = ev->bdaddr_type;
3279 }
3280
3281 if (ev->status) {
3282 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3283 conn->dst_type, ev->status);
3284 hci_proto_connect_cfm(conn, ev->status);
3285 conn->state = BT_CLOSED;
3286 hci_conn_del(conn);
3287 goto unlock;
3288 }
3289
3290 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3291 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3292 conn->dst_type, 0, NULL, 0, NULL);
3293
3294 conn->sec_level = BT_SECURITY_LOW;
3295 conn->handle = __le16_to_cpu(ev->handle);
3296 conn->state = BT_CONNECTED;
3297
3298 hci_conn_hold_device(conn);
3299 hci_conn_add_sysfs(conn);
3300
3301 hci_proto_connect_cfm(conn, ev->status);
3302
3303 unlock:
3304 hci_dev_unlock(hdev);
3305 }
3306
3307 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3308 struct sk_buff *skb)
3309 {
3310 u8 num_reports = skb->data[0];
3311 void *ptr = &skb->data[1];
3312 s8 rssi;
3313
3314 hci_dev_lock(hdev);
3315
3316 while (num_reports--) {
3317 struct hci_ev_le_advertising_info *ev = ptr;
3318
3319 hci_add_adv_entry(hdev, ev);
3320
3321 rssi = ev->data[ev->length];
3322 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3323 NULL, rssi, 0, 1, ev->data,
3324 ev->length);
3325
3326 ptr += sizeof(*ev) + ev->length + 1;
3327 }
3328
3329 hci_dev_unlock(hdev);
3330 }
3331
3332 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3333 struct sk_buff *skb)
3334 {
3335 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3336 struct hci_cp_le_ltk_reply cp;
3337 struct hci_cp_le_ltk_neg_reply neg;
3338 struct hci_conn *conn;
3339 struct smp_ltk *ltk;
3340
3341 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3342
3343 hci_dev_lock(hdev);
3344
3345 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3346 if (conn == NULL)
3347 goto not_found;
3348
3349 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3350 if (ltk == NULL)
3351 goto not_found;
3352
3353 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3354 cp.handle = cpu_to_le16(conn->handle);
3355
3356 if (ltk->authenticated)
3357 conn->sec_level = BT_SECURITY_HIGH;
3358
3359 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3360
3361 if (ltk->type & HCI_SMP_STK) {
3362 list_del(&ltk->list);
3363 kfree(ltk);
3364 }
3365
3366 hci_dev_unlock(hdev);
3367
3368 return;
3369
3370 not_found:
3371 neg.handle = ev->handle;
3372 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3373 hci_dev_unlock(hdev);
3374 }
3375
3376 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3377 {
3378 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3379
3380 skb_pull(skb, sizeof(*le_ev));
3381
3382 switch (le_ev->subevent) {
3383 case HCI_EV_LE_CONN_COMPLETE:
3384 hci_le_conn_complete_evt(hdev, skb);
3385 break;
3386
3387 case HCI_EV_LE_ADVERTISING_REPORT:
3388 hci_le_adv_report_evt(hdev, skb);
3389 break;
3390
3391 case HCI_EV_LE_LTK_REQ:
3392 hci_le_ltk_request_evt(hdev, skb);
3393 break;
3394
3395 default:
3396 break;
3397 }
3398 }
3399
3400 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3401 {
3402 struct hci_event_hdr *hdr = (void *) skb->data;
3403 __u8 event = hdr->evt;
3404
3405 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3406
3407 switch (event) {
3408 case HCI_EV_INQUIRY_COMPLETE:
3409 hci_inquiry_complete_evt(hdev, skb);
3410 break;
3411
3412 case HCI_EV_INQUIRY_RESULT:
3413 hci_inquiry_result_evt(hdev, skb);
3414 break;
3415
3416 case HCI_EV_CONN_COMPLETE:
3417 hci_conn_complete_evt(hdev, skb);
3418 break;
3419
3420 case HCI_EV_CONN_REQUEST:
3421 hci_conn_request_evt(hdev, skb);
3422 break;
3423
3424 case HCI_EV_DISCONN_COMPLETE:
3425 hci_disconn_complete_evt(hdev, skb);
3426 break;
3427
3428 case HCI_EV_AUTH_COMPLETE:
3429 hci_auth_complete_evt(hdev, skb);
3430 break;
3431
3432 case HCI_EV_REMOTE_NAME:
3433 hci_remote_name_evt(hdev, skb);
3434 break;
3435
3436 case HCI_EV_ENCRYPT_CHANGE:
3437 hci_encrypt_change_evt(hdev, skb);
3438 break;
3439
3440 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3441 hci_change_link_key_complete_evt(hdev, skb);
3442 break;
3443
3444 case HCI_EV_REMOTE_FEATURES:
3445 hci_remote_features_evt(hdev, skb);
3446 break;
3447
3448 case HCI_EV_REMOTE_VERSION:
3449 hci_remote_version_evt(hdev, skb);
3450 break;
3451
3452 case HCI_EV_QOS_SETUP_COMPLETE:
3453 hci_qos_setup_complete_evt(hdev, skb);
3454 break;
3455
3456 case HCI_EV_CMD_COMPLETE:
3457 hci_cmd_complete_evt(hdev, skb);
3458 break;
3459
3460 case HCI_EV_CMD_STATUS:
3461 hci_cmd_status_evt(hdev, skb);
3462 break;
3463
3464 case HCI_EV_ROLE_CHANGE:
3465 hci_role_change_evt(hdev, skb);
3466 break;
3467
3468 case HCI_EV_NUM_COMP_PKTS:
3469 hci_num_comp_pkts_evt(hdev, skb);
3470 break;
3471
3472 case HCI_EV_MODE_CHANGE:
3473 hci_mode_change_evt(hdev, skb);
3474 break;
3475
3476 case HCI_EV_PIN_CODE_REQ:
3477 hci_pin_code_request_evt(hdev, skb);
3478 break;
3479
3480 case HCI_EV_LINK_KEY_REQ:
3481 hci_link_key_request_evt(hdev, skb);
3482 break;
3483
3484 case HCI_EV_LINK_KEY_NOTIFY:
3485 hci_link_key_notify_evt(hdev, skb);
3486 break;
3487
3488 case HCI_EV_CLOCK_OFFSET:
3489 hci_clock_offset_evt(hdev, skb);
3490 break;
3491
3492 case HCI_EV_PKT_TYPE_CHANGE:
3493 hci_pkt_type_change_evt(hdev, skb);
3494 break;
3495
3496 case HCI_EV_PSCAN_REP_MODE:
3497 hci_pscan_rep_mode_evt(hdev, skb);
3498 break;
3499
3500 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3501 hci_inquiry_result_with_rssi_evt(hdev, skb);
3502 break;
3503
3504 case HCI_EV_REMOTE_EXT_FEATURES:
3505 hci_remote_ext_features_evt(hdev, skb);
3506 break;
3507
3508 case HCI_EV_SYNC_CONN_COMPLETE:
3509 hci_sync_conn_complete_evt(hdev, skb);
3510 break;
3511
3512 case HCI_EV_SYNC_CONN_CHANGED:
3513 hci_sync_conn_changed_evt(hdev, skb);
3514 break;
3515
3516 case HCI_EV_SNIFF_SUBRATE:
3517 hci_sniff_subrate_evt(hdev, skb);
3518 break;
3519
3520 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3521 hci_extended_inquiry_result_evt(hdev, skb);
3522 break;
3523
3524 case HCI_EV_IO_CAPA_REQUEST:
3525 hci_io_capa_request_evt(hdev, skb);
3526 break;
3527
3528 case HCI_EV_IO_CAPA_REPLY:
3529 hci_io_capa_reply_evt(hdev, skb);
3530 break;
3531
3532 case HCI_EV_USER_CONFIRM_REQUEST:
3533 hci_user_confirm_request_evt(hdev, skb);
3534 break;
3535
3536 case HCI_EV_USER_PASSKEY_REQUEST:
3537 hci_user_passkey_request_evt(hdev, skb);
3538 break;
3539
3540 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3541 hci_simple_pair_complete_evt(hdev, skb);
3542 break;
3543
3544 case HCI_EV_REMOTE_HOST_FEATURES:
3545 hci_remote_host_features_evt(hdev, skb);
3546 break;
3547
3548 case HCI_EV_LE_META:
3549 hci_le_meta_evt(hdev, skb);
3550 break;
3551
3552 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3553 hci_remote_oob_data_request_evt(hdev, skb);
3554 break;
3555
3556 case HCI_EV_NUM_COMP_BLOCKS:
3557 hci_num_comp_blocks_evt(hdev, skb);
3558 break;
3559
3560 default:
3561 BT_DBG("%s event 0x%x", hdev->name, event);
3562 break;
3563 }
3564
3565 kfree_skb(skb);
3566 hdev->stat.evt_rx++;
3567 }