Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
34
35 /* Handle HCI Event packets */
36
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 __u8 status = *((__u8 *) skb->data);
40
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
42
43 if (status) {
44 hci_dev_lock(hdev);
45 mgmt_stop_discovery_failed(hdev, status);
46 hci_dev_unlock(hdev);
47 return;
48 }
49
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status);
57
58 hci_conn_check_pending(hdev);
59 }
60
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 {
63 __u8 status = *((__u8 *) skb->data);
64
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
66
67 if (status)
68 return;
69
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 }
72
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 __u8 status = *((__u8 *) skb->data);
76
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78
79 if (status)
80 return;
81
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83
84 hci_conn_check_pending(hdev);
85 }
86
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct sk_buff *skb)
161 {
162 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163
164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165
166 if (rp->status)
167 return;
168
169 hdev->link_policy = __le16_to_cpu(rp->policy);
170 }
171
172 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 struct sk_buff *skb)
174 {
175 __u8 status = *((__u8 *) skb->data);
176 void *sent;
177
178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
179
180 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 if (!sent)
182 return;
183
184 if (!status)
185 hdev->link_policy = get_unaligned_le16(sent);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 /* Reset all non-persistent flags */
197 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198
199 hdev->discovery.state = DISCOVERY_STOPPED;
200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202
203 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 hdev->adv_data_len = 0;
205 }
206
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208 {
209 __u8 status = *((__u8 *) skb->data);
210 void *sent;
211
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
213
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215 if (!sent)
216 return;
217
218 hci_dev_lock(hdev);
219
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
222 else if (!status)
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224
225 hci_dev_unlock(hdev);
226 }
227
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
231
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233
234 if (rp->status)
235 return;
236
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
245
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
247
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 if (!sent)
250 return;
251
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
254
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
257 else
258 clear_bit(HCI_AUTH, &hdev->flags);
259 }
260
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
263 }
264
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
266 {
267 __u8 status = *((__u8 *) skb->data);
268 void *sent;
269
270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
271
272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
273 if (!sent)
274 return;
275
276 if (!status) {
277 __u8 param = *((__u8 *) sent);
278
279 if (param)
280 set_bit(HCI_ENCRYPT, &hdev->flags);
281 else
282 clear_bit(HCI_ENCRYPT, &hdev->flags);
283 }
284 }
285
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 {
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
290 void *sent;
291
292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
293
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 if (!sent)
296 return;
297
298 param = *((__u8 *) sent);
299
300 hci_dev_lock(hdev);
301
302 if (status) {
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
305 goto done;
306 }
307
308 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310
311 if (param & SCAN_INQUIRY) {
312 set_bit(HCI_ISCAN, &hdev->flags);
313 if (!old_iscan)
314 mgmt_discoverable(hdev, 1);
315 if (hdev->discov_timeout > 0) {
316 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318 to);
319 }
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
322
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
325 if (!old_pscan)
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
329
330 done:
331 hci_dev_unlock(hdev);
332 }
333
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337
338 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
339
340 if (rp->status)
341 return;
342
343 memcpy(hdev->dev_class, rp->dev_class, 3);
344
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 __u8 status = *((__u8 *) skb->data);
352 void *sent;
353
354 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 if (!sent)
358 return;
359
360 hci_dev_lock(hdev);
361
362 if (status == 0)
363 memcpy(hdev->dev_class, sent, 3);
364
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
367
368 hci_dev_unlock(hdev);
369 }
370
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 __u16 setting;
375
376 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
377
378 if (rp->status)
379 return;
380
381 setting = __le16_to_cpu(rp->voice_setting);
382
383 if (hdev->voice_setting == setting)
384 return;
385
386 hdev->voice_setting = setting;
387
388 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
389
390 if (hdev->notify)
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
395 struct sk_buff *skb)
396 {
397 __u8 status = *((__u8 *) skb->data);
398 __u16 setting;
399 void *sent;
400
401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
402
403 if (status)
404 return;
405
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 if (!sent)
408 return;
409
410 setting = get_unaligned_le16(sent);
411
412 if (hdev->voice_setting == setting)
413 return;
414
415 hdev->voice_setting = setting;
416
417 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
418
419 if (hdev->notify)
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421 }
422
423 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 __u8 status = *((__u8 *) skb->data);
426 struct hci_cp_write_ssp_mode *sent;
427
428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
429
430 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
431 if (!sent)
432 return;
433
434 if (!status) {
435 if (sent->mode)
436 hdev->host_features[0] |= LMP_HOST_SSP;
437 else
438 hdev->host_features[0] &= ~LMP_HOST_SSP;
439 }
440
441 if (test_bit(HCI_MGMT, &hdev->dev_flags))
442 mgmt_ssp_enable_complete(hdev, sent->mode, status);
443 else if (!status) {
444 if (sent->mode)
445 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
446 else
447 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 }
449 }
450
451 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
452 {
453 struct hci_rp_read_local_version *rp = (void *) skb->data;
454
455 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
456
457 if (rp->status)
458 return;
459
460 hdev->hci_ver = rp->hci_ver;
461 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
462 hdev->lmp_ver = rp->lmp_ver;
463 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
464 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
465
466 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
467 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
468 }
469
470 static void hci_cc_read_local_commands(struct hci_dev *hdev,
471 struct sk_buff *skb)
472 {
473 struct hci_rp_read_local_commands *rp = (void *) skb->data;
474
475 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
476
477 if (!rp->status)
478 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
479 }
480
481 static void hci_cc_read_local_features(struct hci_dev *hdev,
482 struct sk_buff *skb)
483 {
484 struct hci_rp_read_local_features *rp = (void *) skb->data;
485
486 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
487
488 if (rp->status)
489 return;
490
491 memcpy(hdev->features, rp->features, 8);
492
493 /* Adjust default settings according to features
494 * supported by device. */
495
496 if (hdev->features[0] & LMP_3SLOT)
497 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
498
499 if (hdev->features[0] & LMP_5SLOT)
500 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
501
502 if (hdev->features[1] & LMP_HV2) {
503 hdev->pkt_type |= (HCI_HV2);
504 hdev->esco_type |= (ESCO_HV2);
505 }
506
507 if (hdev->features[1] & LMP_HV3) {
508 hdev->pkt_type |= (HCI_HV3);
509 hdev->esco_type |= (ESCO_HV3);
510 }
511
512 if (lmp_esco_capable(hdev))
513 hdev->esco_type |= (ESCO_EV3);
514
515 if (hdev->features[4] & LMP_EV4)
516 hdev->esco_type |= (ESCO_EV4);
517
518 if (hdev->features[4] & LMP_EV5)
519 hdev->esco_type |= (ESCO_EV5);
520
521 if (hdev->features[5] & LMP_EDR_ESCO_2M)
522 hdev->esco_type |= (ESCO_2EV3);
523
524 if (hdev->features[5] & LMP_EDR_ESCO_3M)
525 hdev->esco_type |= (ESCO_3EV3);
526
527 if (hdev->features[5] & LMP_EDR_3S_ESCO)
528 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
529
530 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
531 hdev->features[0], hdev->features[1],
532 hdev->features[2], hdev->features[3],
533 hdev->features[4], hdev->features[5],
534 hdev->features[6], hdev->features[7]);
535 }
536
537 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
538 struct sk_buff *skb)
539 {
540 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
541
542 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
543
544 if (rp->status)
545 return;
546
547 switch (rp->page) {
548 case 0:
549 memcpy(hdev->features, rp->features, 8);
550 break;
551 case 1:
552 memcpy(hdev->host_features, rp->features, 8);
553 break;
554 }
555 }
556
557 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
558 struct sk_buff *skb)
559 {
560 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
561
562 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
563
564 if (!rp->status)
565 hdev->flow_ctl_mode = rp->mode;
566 }
567
568 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
569 {
570 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
571
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573
574 if (rp->status)
575 return;
576
577 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
578 hdev->sco_mtu = rp->sco_mtu;
579 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
580 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
581
582 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
583 hdev->sco_mtu = 64;
584 hdev->sco_pkts = 8;
585 }
586
587 hdev->acl_cnt = hdev->acl_pkts;
588 hdev->sco_cnt = hdev->sco_pkts;
589
590 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
591 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
592 }
593
594 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
595 {
596 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
597
598 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
599
600 if (!rp->status)
601 bacpy(&hdev->bdaddr, &rp->bdaddr);
602 }
603
604 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
605 struct sk_buff *skb)
606 {
607 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
608
609 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610
611 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
612 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
613 hdev->page_scan_window = __le16_to_cpu(rp->window);
614 }
615 }
616
617 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
618 struct sk_buff *skb)
619 {
620 u8 status = *((u8 *) skb->data);
621 struct hci_cp_write_page_scan_activity *sent;
622
623 BT_DBG("%s status 0x%2.2x", hdev->name, status);
624
625 if (status)
626 return;
627
628 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
629 if (!sent)
630 return;
631
632 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
633 hdev->page_scan_window = __le16_to_cpu(sent->window);
634 }
635
636 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
637 struct sk_buff *skb)
638 {
639 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
640
641 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
642
643 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
644 hdev->page_scan_type = rp->type;
645 }
646
647 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
648 struct sk_buff *skb)
649 {
650 u8 status = *((u8 *) skb->data);
651 u8 *type;
652
653 BT_DBG("%s status 0x%2.2x", hdev->name, status);
654
655 if (status)
656 return;
657
658 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
659 if (type)
660 hdev->page_scan_type = *type;
661 }
662
663 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
664 struct sk_buff *skb)
665 {
666 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
667
668 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
669
670 if (rp->status)
671 return;
672
673 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
674 hdev->block_len = __le16_to_cpu(rp->block_len);
675 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
676
677 hdev->block_cnt = hdev->num_blocks;
678
679 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
680 hdev->block_cnt, hdev->block_len);
681 }
682
683 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
684 struct sk_buff *skb)
685 {
686 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
687
688 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
689
690 if (rp->status)
691 goto a2mp_rsp;
692
693 hdev->amp_status = rp->amp_status;
694 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
695 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
696 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
697 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
698 hdev->amp_type = rp->amp_type;
699 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
700 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
701 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
702 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
703
704 a2mp_rsp:
705 a2mp_send_getinfo_rsp(hdev);
706 }
707
708 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
709 struct sk_buff *skb)
710 {
711 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
712 struct amp_assoc *assoc = &hdev->loc_assoc;
713 size_t rem_len, frag_len;
714
715 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716
717 if (rp->status)
718 goto a2mp_rsp;
719
720 frag_len = skb->len - sizeof(*rp);
721 rem_len = __le16_to_cpu(rp->rem_len);
722
723 if (rem_len > frag_len) {
724 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
725
726 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
727 assoc->offset += frag_len;
728
729 /* Read other fragments */
730 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
731
732 return;
733 }
734
735 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
736 assoc->len = assoc->offset + rem_len;
737 assoc->offset = 0;
738
739 a2mp_rsp:
740 /* Send A2MP Rsp when all fragments are received */
741 a2mp_send_getampassoc_rsp(hdev, rp->status);
742 a2mp_send_create_phy_link_req(hdev, rp->status);
743 }
744
745 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
746 struct sk_buff *skb)
747 {
748 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
749
750 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
751
752 if (!rp->status)
753 hdev->inq_tx_power = rp->tx_power;
754 }
755
756 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
757 {
758 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
759 struct hci_cp_pin_code_reply *cp;
760 struct hci_conn *conn;
761
762 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
763
764 hci_dev_lock(hdev);
765
766 if (test_bit(HCI_MGMT, &hdev->dev_flags))
767 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
768
769 if (rp->status)
770 goto unlock;
771
772 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
773 if (!cp)
774 goto unlock;
775
776 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
777 if (conn)
778 conn->pin_length = cp->pin_len;
779
780 unlock:
781 hci_dev_unlock(hdev);
782 }
783
784 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
785 {
786 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
787
788 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
789
790 hci_dev_lock(hdev);
791
792 if (test_bit(HCI_MGMT, &hdev->dev_flags))
793 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
794 rp->status);
795
796 hci_dev_unlock(hdev);
797 }
798
799 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
800 struct sk_buff *skb)
801 {
802 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
803
804 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
805
806 if (rp->status)
807 return;
808
809 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
810 hdev->le_pkts = rp->le_max_pkt;
811
812 hdev->le_cnt = hdev->le_pkts;
813
814 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
815 }
816
817 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
818 struct sk_buff *skb)
819 {
820 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
821
822 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823
824 if (!rp->status)
825 memcpy(hdev->le_features, rp->features, 8);
826 }
827
828 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
829 struct sk_buff *skb)
830 {
831 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
832
833 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
834
835 if (!rp->status)
836 hdev->adv_tx_power = rp->tx_power;
837 }
838
839 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
840 {
841 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
842
843 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
844
845 hci_dev_lock(hdev);
846
847 if (test_bit(HCI_MGMT, &hdev->dev_flags))
848 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
849 rp->status);
850
851 hci_dev_unlock(hdev);
852 }
853
854 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
855 struct sk_buff *skb)
856 {
857 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
858
859 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
860
861 hci_dev_lock(hdev);
862
863 if (test_bit(HCI_MGMT, &hdev->dev_flags))
864 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
865 ACL_LINK, 0, rp->status);
866
867 hci_dev_unlock(hdev);
868 }
869
870 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
871 {
872 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
873
874 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
875
876 hci_dev_lock(hdev);
877
878 if (test_bit(HCI_MGMT, &hdev->dev_flags))
879 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
880 0, rp->status);
881
882 hci_dev_unlock(hdev);
883 }
884
885 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
886 struct sk_buff *skb)
887 {
888 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
889
890 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
891
892 hci_dev_lock(hdev);
893
894 if (test_bit(HCI_MGMT, &hdev->dev_flags))
895 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
896 ACL_LINK, 0, rp->status);
897
898 hci_dev_unlock(hdev);
899 }
900
901 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
902 struct sk_buff *skb)
903 {
904 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
905
906 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
907
908 hci_dev_lock(hdev);
909 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
910 rp->randomizer, rp->status);
911 hci_dev_unlock(hdev);
912 }
913
914 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
915 {
916 __u8 *sent, status = *((__u8 *) skb->data);
917
918 BT_DBG("%s status 0x%2.2x", hdev->name, status);
919
920 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
921 if (!sent)
922 return;
923
924 hci_dev_lock(hdev);
925
926 if (!status) {
927 if (*sent)
928 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
929 else
930 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
931 }
932
933 if (!test_bit(HCI_INIT, &hdev->flags)) {
934 struct hci_request req;
935
936 hci_req_init(&req, hdev);
937 hci_update_ad(&req);
938 hci_req_run(&req, NULL);
939 }
940
941 hci_dev_unlock(hdev);
942 }
943
944 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
945 {
946 __u8 status = *((__u8 *) skb->data);
947
948 BT_DBG("%s status 0x%2.2x", hdev->name, status);
949
950 if (status) {
951 hci_dev_lock(hdev);
952 mgmt_start_discovery_failed(hdev, status);
953 hci_dev_unlock(hdev);
954 return;
955 }
956 }
957
958 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
959 struct sk_buff *skb)
960 {
961 struct hci_cp_le_set_scan_enable *cp;
962 __u8 status = *((__u8 *) skb->data);
963
964 BT_DBG("%s status 0x%2.2x", hdev->name, status);
965
966 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
967 if (!cp)
968 return;
969
970 switch (cp->enable) {
971 case LE_SCANNING_ENABLED:
972 if (status) {
973 hci_dev_lock(hdev);
974 mgmt_start_discovery_failed(hdev, status);
975 hci_dev_unlock(hdev);
976 return;
977 }
978
979 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
980
981 hci_dev_lock(hdev);
982 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
983 hci_dev_unlock(hdev);
984 break;
985
986 case LE_SCANNING_DISABLED:
987 if (status) {
988 hci_dev_lock(hdev);
989 mgmt_stop_discovery_failed(hdev, status);
990 hci_dev_unlock(hdev);
991 return;
992 }
993
994 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
995
996 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
997 hdev->discovery.state == DISCOVERY_FINDING) {
998 mgmt_interleaved_discovery(hdev);
999 } else {
1000 hci_dev_lock(hdev);
1001 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1002 hci_dev_unlock(hdev);
1003 }
1004
1005 break;
1006
1007 default:
1008 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1009 break;
1010 }
1011 }
1012
1013 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1014 struct sk_buff *skb)
1015 {
1016 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1017
1018 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1019
1020 if (!rp->status)
1021 hdev->le_white_list_size = rp->size;
1022 }
1023
1024 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1025 struct sk_buff *skb)
1026 {
1027 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1028
1029 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030
1031 if (!rp->status)
1032 memcpy(hdev->le_states, rp->le_states, 8);
1033 }
1034
1035 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1036 struct sk_buff *skb)
1037 {
1038 struct hci_cp_write_le_host_supported *sent;
1039 __u8 status = *((__u8 *) skb->data);
1040
1041 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1042
1043 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1044 if (!sent)
1045 return;
1046
1047 if (!status) {
1048 if (sent->le)
1049 hdev->host_features[0] |= LMP_HOST_LE;
1050 else
1051 hdev->host_features[0] &= ~LMP_HOST_LE;
1052
1053 if (sent->simul)
1054 hdev->host_features[0] |= LMP_HOST_LE_BREDR;
1055 else
1056 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
1057 }
1058
1059 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1060 !test_bit(HCI_INIT, &hdev->flags))
1061 mgmt_le_enable_complete(hdev, sent->le, status);
1062 }
1063
1064 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1065 struct sk_buff *skb)
1066 {
1067 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1068
1069 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1070 hdev->name, rp->status, rp->phy_handle);
1071
1072 if (rp->status)
1073 return;
1074
1075 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1076 }
1077
1078 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1079 {
1080 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1081
1082 if (status) {
1083 hci_conn_check_pending(hdev);
1084 hci_dev_lock(hdev);
1085 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1086 mgmt_start_discovery_failed(hdev, status);
1087 hci_dev_unlock(hdev);
1088 return;
1089 }
1090
1091 set_bit(HCI_INQUIRY, &hdev->flags);
1092
1093 hci_dev_lock(hdev);
1094 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1095 hci_dev_unlock(hdev);
1096 }
1097
1098 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1099 {
1100 struct hci_cp_create_conn *cp;
1101 struct hci_conn *conn;
1102
1103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1104
1105 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1106 if (!cp)
1107 return;
1108
1109 hci_dev_lock(hdev);
1110
1111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1112
1113 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1114
1115 if (status) {
1116 if (conn && conn->state == BT_CONNECT) {
1117 if (status != 0x0c || conn->attempt > 2) {
1118 conn->state = BT_CLOSED;
1119 hci_proto_connect_cfm(conn, status);
1120 hci_conn_del(conn);
1121 } else
1122 conn->state = BT_CONNECT2;
1123 }
1124 } else {
1125 if (!conn) {
1126 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1127 if (conn) {
1128 conn->out = true;
1129 conn->link_mode |= HCI_LM_MASTER;
1130 } else
1131 BT_ERR("No memory for new connection");
1132 }
1133 }
1134
1135 hci_dev_unlock(hdev);
1136 }
1137
1138 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1139 {
1140 struct hci_cp_add_sco *cp;
1141 struct hci_conn *acl, *sco;
1142 __u16 handle;
1143
1144 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1145
1146 if (!status)
1147 return;
1148
1149 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1150 if (!cp)
1151 return;
1152
1153 handle = __le16_to_cpu(cp->handle);
1154
1155 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1156
1157 hci_dev_lock(hdev);
1158
1159 acl = hci_conn_hash_lookup_handle(hdev, handle);
1160 if (acl) {
1161 sco = acl->link;
1162 if (sco) {
1163 sco->state = BT_CLOSED;
1164
1165 hci_proto_connect_cfm(sco, status);
1166 hci_conn_del(sco);
1167 }
1168 }
1169
1170 hci_dev_unlock(hdev);
1171 }
1172
1173 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1174 {
1175 struct hci_cp_auth_requested *cp;
1176 struct hci_conn *conn;
1177
1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1179
1180 if (!status)
1181 return;
1182
1183 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1184 if (!cp)
1185 return;
1186
1187 hci_dev_lock(hdev);
1188
1189 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1190 if (conn) {
1191 if (conn->state == BT_CONFIG) {
1192 hci_proto_connect_cfm(conn, status);
1193 hci_conn_put(conn);
1194 }
1195 }
1196
1197 hci_dev_unlock(hdev);
1198 }
1199
1200 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1201 {
1202 struct hci_cp_set_conn_encrypt *cp;
1203 struct hci_conn *conn;
1204
1205 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1206
1207 if (!status)
1208 return;
1209
1210 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1211 if (!cp)
1212 return;
1213
1214 hci_dev_lock(hdev);
1215
1216 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1217 if (conn) {
1218 if (conn->state == BT_CONFIG) {
1219 hci_proto_connect_cfm(conn, status);
1220 hci_conn_put(conn);
1221 }
1222 }
1223
1224 hci_dev_unlock(hdev);
1225 }
1226
1227 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1228 struct hci_conn *conn)
1229 {
1230 if (conn->state != BT_CONFIG || !conn->out)
1231 return 0;
1232
1233 if (conn->pending_sec_level == BT_SECURITY_SDP)
1234 return 0;
1235
1236 /* Only request authentication for SSP connections or non-SSP
1237 * devices with sec_level HIGH or if MITM protection is requested */
1238 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1239 conn->pending_sec_level != BT_SECURITY_HIGH)
1240 return 0;
1241
1242 return 1;
1243 }
1244
1245 static int hci_resolve_name(struct hci_dev *hdev,
1246 struct inquiry_entry *e)
1247 {
1248 struct hci_cp_remote_name_req cp;
1249
1250 memset(&cp, 0, sizeof(cp));
1251
1252 bacpy(&cp.bdaddr, &e->data.bdaddr);
1253 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1254 cp.pscan_mode = e->data.pscan_mode;
1255 cp.clock_offset = e->data.clock_offset;
1256
1257 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1258 }
1259
1260 static bool hci_resolve_next_name(struct hci_dev *hdev)
1261 {
1262 struct discovery_state *discov = &hdev->discovery;
1263 struct inquiry_entry *e;
1264
1265 if (list_empty(&discov->resolve))
1266 return false;
1267
1268 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1269 if (!e)
1270 return false;
1271
1272 if (hci_resolve_name(hdev, e) == 0) {
1273 e->name_state = NAME_PENDING;
1274 return true;
1275 }
1276
1277 return false;
1278 }
1279
1280 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1281 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1282 {
1283 struct discovery_state *discov = &hdev->discovery;
1284 struct inquiry_entry *e;
1285
1286 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1287 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1288 name_len, conn->dev_class);
1289
1290 if (discov->state == DISCOVERY_STOPPED)
1291 return;
1292
1293 if (discov->state == DISCOVERY_STOPPING)
1294 goto discov_complete;
1295
1296 if (discov->state != DISCOVERY_RESOLVING)
1297 return;
1298
1299 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1300 /* If the device was not found in a list of found devices names of which
1301 * are pending. there is no need to continue resolving a next name as it
1302 * will be done upon receiving another Remote Name Request Complete
1303 * Event */
1304 if (!e)
1305 return;
1306
1307 list_del(&e->list);
1308 if (name) {
1309 e->name_state = NAME_KNOWN;
1310 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1311 e->data.rssi, name, name_len);
1312 } else {
1313 e->name_state = NAME_NOT_KNOWN;
1314 }
1315
1316 if (hci_resolve_next_name(hdev))
1317 return;
1318
1319 discov_complete:
1320 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1321 }
1322
1323 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1324 {
1325 struct hci_cp_remote_name_req *cp;
1326 struct hci_conn *conn;
1327
1328 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1329
1330 /* If successful wait for the name req complete event before
1331 * checking for the need to do authentication */
1332 if (!status)
1333 return;
1334
1335 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1336 if (!cp)
1337 return;
1338
1339 hci_dev_lock(hdev);
1340
1341 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1342
1343 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1344 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1345
1346 if (!conn)
1347 goto unlock;
1348
1349 if (!hci_outgoing_auth_needed(hdev, conn))
1350 goto unlock;
1351
1352 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1353 struct hci_cp_auth_requested cp;
1354 cp.handle = __cpu_to_le16(conn->handle);
1355 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1356 }
1357
1358 unlock:
1359 hci_dev_unlock(hdev);
1360 }
1361
1362 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1363 {
1364 struct hci_cp_read_remote_features *cp;
1365 struct hci_conn *conn;
1366
1367 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1368
1369 if (!status)
1370 return;
1371
1372 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1373 if (!cp)
1374 return;
1375
1376 hci_dev_lock(hdev);
1377
1378 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1379 if (conn) {
1380 if (conn->state == BT_CONFIG) {
1381 hci_proto_connect_cfm(conn, status);
1382 hci_conn_put(conn);
1383 }
1384 }
1385
1386 hci_dev_unlock(hdev);
1387 }
1388
1389 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1390 {
1391 struct hci_cp_read_remote_ext_features *cp;
1392 struct hci_conn *conn;
1393
1394 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1395
1396 if (!status)
1397 return;
1398
1399 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1400 if (!cp)
1401 return;
1402
1403 hci_dev_lock(hdev);
1404
1405 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1406 if (conn) {
1407 if (conn->state == BT_CONFIG) {
1408 hci_proto_connect_cfm(conn, status);
1409 hci_conn_put(conn);
1410 }
1411 }
1412
1413 hci_dev_unlock(hdev);
1414 }
1415
1416 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1417 {
1418 struct hci_cp_setup_sync_conn *cp;
1419 struct hci_conn *acl, *sco;
1420 __u16 handle;
1421
1422 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1423
1424 if (!status)
1425 return;
1426
1427 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1428 if (!cp)
1429 return;
1430
1431 handle = __le16_to_cpu(cp->handle);
1432
1433 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1434
1435 hci_dev_lock(hdev);
1436
1437 acl = hci_conn_hash_lookup_handle(hdev, handle);
1438 if (acl) {
1439 sco = acl->link;
1440 if (sco) {
1441 sco->state = BT_CLOSED;
1442
1443 hci_proto_connect_cfm(sco, status);
1444 hci_conn_del(sco);
1445 }
1446 }
1447
1448 hci_dev_unlock(hdev);
1449 }
1450
1451 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1452 {
1453 struct hci_cp_sniff_mode *cp;
1454 struct hci_conn *conn;
1455
1456 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1457
1458 if (!status)
1459 return;
1460
1461 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1462 if (!cp)
1463 return;
1464
1465 hci_dev_lock(hdev);
1466
1467 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1468 if (conn) {
1469 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1470
1471 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1472 hci_sco_setup(conn, status);
1473 }
1474
1475 hci_dev_unlock(hdev);
1476 }
1477
1478 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1479 {
1480 struct hci_cp_exit_sniff_mode *cp;
1481 struct hci_conn *conn;
1482
1483 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1484
1485 if (!status)
1486 return;
1487
1488 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1489 if (!cp)
1490 return;
1491
1492 hci_dev_lock(hdev);
1493
1494 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1495 if (conn) {
1496 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1497
1498 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1499 hci_sco_setup(conn, status);
1500 }
1501
1502 hci_dev_unlock(hdev);
1503 }
1504
1505 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1506 {
1507 struct hci_cp_disconnect *cp;
1508 struct hci_conn *conn;
1509
1510 if (!status)
1511 return;
1512
1513 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1514 if (!cp)
1515 return;
1516
1517 hci_dev_lock(hdev);
1518
1519 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1520 if (conn)
1521 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1522 conn->dst_type, status);
1523
1524 hci_dev_unlock(hdev);
1525 }
1526
1527 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1528 {
1529 struct hci_conn *conn;
1530
1531 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1532
1533 if (status) {
1534 hci_dev_lock(hdev);
1535
1536 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1537 if (!conn) {
1538 hci_dev_unlock(hdev);
1539 return;
1540 }
1541
1542 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1543
1544 conn->state = BT_CLOSED;
1545 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1546 conn->dst_type, status);
1547 hci_proto_connect_cfm(conn, status);
1548 hci_conn_del(conn);
1549
1550 hci_dev_unlock(hdev);
1551 }
1552 }
1553
1554 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1555 {
1556 struct hci_cp_create_phy_link *cp;
1557
1558 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1559
1560 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1561 if (!cp)
1562 return;
1563
1564 hci_dev_lock(hdev);
1565
1566 if (status) {
1567 struct hci_conn *hcon;
1568
1569 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1570 if (hcon)
1571 hci_conn_del(hcon);
1572 } else {
1573 amp_write_remote_assoc(hdev, cp->phy_handle);
1574 }
1575
1576 hci_dev_unlock(hdev);
1577 }
1578
1579 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1580 {
1581 struct hci_cp_accept_phy_link *cp;
1582
1583 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1584
1585 if (status)
1586 return;
1587
1588 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1589 if (!cp)
1590 return;
1591
1592 amp_write_remote_assoc(hdev, cp->phy_handle);
1593 }
1594
1595 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1596 {
1597 __u8 status = *((__u8 *) skb->data);
1598 struct discovery_state *discov = &hdev->discovery;
1599 struct inquiry_entry *e;
1600
1601 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1602
1603 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status);
1604
1605 hci_conn_check_pending(hdev);
1606
1607 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1608 return;
1609
1610 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1611 return;
1612
1613 hci_dev_lock(hdev);
1614
1615 if (discov->state != DISCOVERY_FINDING)
1616 goto unlock;
1617
1618 if (list_empty(&discov->resolve)) {
1619 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1620 goto unlock;
1621 }
1622
1623 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1624 if (e && hci_resolve_name(hdev, e) == 0) {
1625 e->name_state = NAME_PENDING;
1626 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1627 } else {
1628 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1629 }
1630
1631 unlock:
1632 hci_dev_unlock(hdev);
1633 }
1634
1635 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1636 {
1637 struct inquiry_data data;
1638 struct inquiry_info *info = (void *) (skb->data + 1);
1639 int num_rsp = *((__u8 *) skb->data);
1640
1641 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1642
1643 if (!num_rsp)
1644 return;
1645
1646 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1647 return;
1648
1649 hci_dev_lock(hdev);
1650
1651 for (; num_rsp; num_rsp--, info++) {
1652 bool name_known, ssp;
1653
1654 bacpy(&data.bdaddr, &info->bdaddr);
1655 data.pscan_rep_mode = info->pscan_rep_mode;
1656 data.pscan_period_mode = info->pscan_period_mode;
1657 data.pscan_mode = info->pscan_mode;
1658 memcpy(data.dev_class, info->dev_class, 3);
1659 data.clock_offset = info->clock_offset;
1660 data.rssi = 0x00;
1661 data.ssp_mode = 0x00;
1662
1663 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1664 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1665 info->dev_class, 0, !name_known, ssp, NULL,
1666 0);
1667 }
1668
1669 hci_dev_unlock(hdev);
1670 }
1671
1672 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1673 {
1674 struct hci_ev_conn_complete *ev = (void *) skb->data;
1675 struct hci_conn *conn;
1676
1677 BT_DBG("%s", hdev->name);
1678
1679 hci_dev_lock(hdev);
1680
1681 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1682 if (!conn) {
1683 if (ev->link_type != SCO_LINK)
1684 goto unlock;
1685
1686 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1687 if (!conn)
1688 goto unlock;
1689
1690 conn->type = SCO_LINK;
1691 }
1692
1693 if (!ev->status) {
1694 conn->handle = __le16_to_cpu(ev->handle);
1695
1696 if (conn->type == ACL_LINK) {
1697 conn->state = BT_CONFIG;
1698 hci_conn_hold(conn);
1699
1700 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1701 !hci_find_link_key(hdev, &ev->bdaddr))
1702 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1703 else
1704 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1705 } else
1706 conn->state = BT_CONNECTED;
1707
1708 hci_conn_hold_device(conn);
1709 hci_conn_add_sysfs(conn);
1710
1711 if (test_bit(HCI_AUTH, &hdev->flags))
1712 conn->link_mode |= HCI_LM_AUTH;
1713
1714 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1715 conn->link_mode |= HCI_LM_ENCRYPT;
1716
1717 /* Get remote features */
1718 if (conn->type == ACL_LINK) {
1719 struct hci_cp_read_remote_features cp;
1720 cp.handle = ev->handle;
1721 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1722 sizeof(cp), &cp);
1723 }
1724
1725 /* Set packet type for incoming connection */
1726 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1727 struct hci_cp_change_conn_ptype cp;
1728 cp.handle = ev->handle;
1729 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1730 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1731 &cp);
1732 }
1733 } else {
1734 conn->state = BT_CLOSED;
1735 if (conn->type == ACL_LINK)
1736 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1737 conn->dst_type, ev->status);
1738 }
1739
1740 if (conn->type == ACL_LINK)
1741 hci_sco_setup(conn, ev->status);
1742
1743 if (ev->status) {
1744 hci_proto_connect_cfm(conn, ev->status);
1745 hci_conn_del(conn);
1746 } else if (ev->link_type != ACL_LINK)
1747 hci_proto_connect_cfm(conn, ev->status);
1748
1749 unlock:
1750 hci_dev_unlock(hdev);
1751
1752 hci_conn_check_pending(hdev);
1753 }
1754
1755 void hci_conn_accept(struct hci_conn *conn, int mask)
1756 {
1757 struct hci_dev *hdev = conn->hdev;
1758
1759 BT_DBG("conn %p", conn);
1760
1761 conn->state = BT_CONFIG;
1762
1763 if (!lmp_esco_capable(hdev)) {
1764 struct hci_cp_accept_conn_req cp;
1765
1766 bacpy(&cp.bdaddr, &conn->dst);
1767
1768 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1769 cp.role = 0x00; /* Become master */
1770 else
1771 cp.role = 0x01; /* Remain slave */
1772
1773 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
1774 } else /* lmp_esco_capable(hdev)) */ {
1775 struct hci_cp_accept_sync_conn_req cp;
1776
1777 bacpy(&cp.bdaddr, &conn->dst);
1778 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1779
1780 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1781 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1782 cp.max_latency = __constant_cpu_to_le16(0xffff);
1783 cp.content_format = cpu_to_le16(hdev->voice_setting);
1784 cp.retrans_effort = 0xff;
1785
1786 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1787 sizeof(cp), &cp);
1788 }
1789 }
1790
1791 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1792 {
1793 struct hci_ev_conn_request *ev = (void *) skb->data;
1794 int mask = hdev->link_mode;
1795 __u8 flags = 0;
1796
1797 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1798 ev->link_type);
1799
1800 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1801 &flags);
1802
1803 if ((mask & HCI_LM_ACCEPT) &&
1804 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1805 /* Connection accepted */
1806 struct inquiry_entry *ie;
1807 struct hci_conn *conn;
1808
1809 hci_dev_lock(hdev);
1810
1811 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1812 if (ie)
1813 memcpy(ie->data.dev_class, ev->dev_class, 3);
1814
1815 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1816 &ev->bdaddr);
1817 if (!conn) {
1818 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1819 if (!conn) {
1820 BT_ERR("No memory for new connection");
1821 hci_dev_unlock(hdev);
1822 return;
1823 }
1824 }
1825
1826 memcpy(conn->dev_class, ev->dev_class, 3);
1827
1828 hci_dev_unlock(hdev);
1829
1830 if (ev->link_type == ACL_LINK ||
1831 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1832 struct hci_cp_accept_conn_req cp;
1833 conn->state = BT_CONNECT;
1834
1835 bacpy(&cp.bdaddr, &ev->bdaddr);
1836
1837 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1838 cp.role = 0x00; /* Become master */
1839 else
1840 cp.role = 0x01; /* Remain slave */
1841
1842 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1843 &cp);
1844 } else if (!(flags & HCI_PROTO_DEFER)) {
1845 struct hci_cp_accept_sync_conn_req cp;
1846 conn->state = BT_CONNECT;
1847
1848 bacpy(&cp.bdaddr, &ev->bdaddr);
1849 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1850
1851 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1852 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1853 cp.max_latency = __constant_cpu_to_le16(0xffff);
1854 cp.content_format = cpu_to_le16(hdev->voice_setting);
1855 cp.retrans_effort = 0xff;
1856
1857 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1858 sizeof(cp), &cp);
1859 } else {
1860 conn->state = BT_CONNECT2;
1861 hci_proto_connect_cfm(conn, 0);
1862 hci_conn_put(conn);
1863 }
1864 } else {
1865 /* Connection rejected */
1866 struct hci_cp_reject_conn_req cp;
1867
1868 bacpy(&cp.bdaddr, &ev->bdaddr);
1869 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1870 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1871 }
1872 }
1873
1874 static u8 hci_to_mgmt_reason(u8 err)
1875 {
1876 switch (err) {
1877 case HCI_ERROR_CONNECTION_TIMEOUT:
1878 return MGMT_DEV_DISCONN_TIMEOUT;
1879 case HCI_ERROR_REMOTE_USER_TERM:
1880 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1881 case HCI_ERROR_REMOTE_POWER_OFF:
1882 return MGMT_DEV_DISCONN_REMOTE;
1883 case HCI_ERROR_LOCAL_HOST_TERM:
1884 return MGMT_DEV_DISCONN_LOCAL_HOST;
1885 default:
1886 return MGMT_DEV_DISCONN_UNKNOWN;
1887 }
1888 }
1889
1890 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1891 {
1892 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1893 struct hci_conn *conn;
1894
1895 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1896
1897 hci_dev_lock(hdev);
1898
1899 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1900 if (!conn)
1901 goto unlock;
1902
1903 if (ev->status == 0)
1904 conn->state = BT_CLOSED;
1905
1906 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1907 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1908 if (ev->status) {
1909 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1910 conn->dst_type, ev->status);
1911 } else {
1912 u8 reason = hci_to_mgmt_reason(ev->reason);
1913
1914 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1915 conn->dst_type, reason);
1916 }
1917 }
1918
1919 if (ev->status == 0) {
1920 if (conn->type == ACL_LINK && conn->flush_key)
1921 hci_remove_link_key(hdev, &conn->dst);
1922 hci_proto_disconn_cfm(conn, ev->reason);
1923 hci_conn_del(conn);
1924 }
1925
1926 unlock:
1927 hci_dev_unlock(hdev);
1928 }
1929
1930 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1931 {
1932 struct hci_ev_auth_complete *ev = (void *) skb->data;
1933 struct hci_conn *conn;
1934
1935 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1936
1937 hci_dev_lock(hdev);
1938
1939 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1940 if (!conn)
1941 goto unlock;
1942
1943 if (!ev->status) {
1944 if (!hci_conn_ssp_enabled(conn) &&
1945 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1946 BT_INFO("re-auth of legacy device is not possible.");
1947 } else {
1948 conn->link_mode |= HCI_LM_AUTH;
1949 conn->sec_level = conn->pending_sec_level;
1950 }
1951 } else {
1952 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1953 ev->status);
1954 }
1955
1956 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1957 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1958
1959 if (conn->state == BT_CONFIG) {
1960 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1961 struct hci_cp_set_conn_encrypt cp;
1962 cp.handle = ev->handle;
1963 cp.encrypt = 0x01;
1964 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1965 &cp);
1966 } else {
1967 conn->state = BT_CONNECTED;
1968 hci_proto_connect_cfm(conn, ev->status);
1969 hci_conn_put(conn);
1970 }
1971 } else {
1972 hci_auth_cfm(conn, ev->status);
1973
1974 hci_conn_hold(conn);
1975 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1976 hci_conn_put(conn);
1977 }
1978
1979 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1980 if (!ev->status) {
1981 struct hci_cp_set_conn_encrypt cp;
1982 cp.handle = ev->handle;
1983 cp.encrypt = 0x01;
1984 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1985 &cp);
1986 } else {
1987 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1988 hci_encrypt_cfm(conn, ev->status, 0x00);
1989 }
1990 }
1991
1992 unlock:
1993 hci_dev_unlock(hdev);
1994 }
1995
1996 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1997 {
1998 struct hci_ev_remote_name *ev = (void *) skb->data;
1999 struct hci_conn *conn;
2000
2001 BT_DBG("%s", hdev->name);
2002
2003 hci_conn_check_pending(hdev);
2004
2005 hci_dev_lock(hdev);
2006
2007 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2008
2009 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2010 goto check_auth;
2011
2012 if (ev->status == 0)
2013 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2014 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2015 else
2016 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2017
2018 check_auth:
2019 if (!conn)
2020 goto unlock;
2021
2022 if (!hci_outgoing_auth_needed(hdev, conn))
2023 goto unlock;
2024
2025 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2026 struct hci_cp_auth_requested cp;
2027 cp.handle = __cpu_to_le16(conn->handle);
2028 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2029 }
2030
2031 unlock:
2032 hci_dev_unlock(hdev);
2033 }
2034
2035 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2036 {
2037 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2038 struct hci_conn *conn;
2039
2040 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2041
2042 hci_dev_lock(hdev);
2043
2044 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2045 if (conn) {
2046 if (!ev->status) {
2047 if (ev->encrypt) {
2048 /* Encryption implies authentication */
2049 conn->link_mode |= HCI_LM_AUTH;
2050 conn->link_mode |= HCI_LM_ENCRYPT;
2051 conn->sec_level = conn->pending_sec_level;
2052 } else
2053 conn->link_mode &= ~HCI_LM_ENCRYPT;
2054 }
2055
2056 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2057
2058 if (ev->status && conn->state == BT_CONNECTED) {
2059 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2060 hci_conn_put(conn);
2061 goto unlock;
2062 }
2063
2064 if (conn->state == BT_CONFIG) {
2065 if (!ev->status)
2066 conn->state = BT_CONNECTED;
2067
2068 hci_proto_connect_cfm(conn, ev->status);
2069 hci_conn_put(conn);
2070 } else
2071 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2072 }
2073
2074 unlock:
2075 hci_dev_unlock(hdev);
2076 }
2077
2078 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2079 struct sk_buff *skb)
2080 {
2081 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2082 struct hci_conn *conn;
2083
2084 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2085
2086 hci_dev_lock(hdev);
2087
2088 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2089 if (conn) {
2090 if (!ev->status)
2091 conn->link_mode |= HCI_LM_SECURE;
2092
2093 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2094
2095 hci_key_change_cfm(conn, ev->status);
2096 }
2097
2098 hci_dev_unlock(hdev);
2099 }
2100
2101 static void hci_remote_features_evt(struct hci_dev *hdev,
2102 struct sk_buff *skb)
2103 {
2104 struct hci_ev_remote_features *ev = (void *) skb->data;
2105 struct hci_conn *conn;
2106
2107 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2108
2109 hci_dev_lock(hdev);
2110
2111 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2112 if (!conn)
2113 goto unlock;
2114
2115 if (!ev->status)
2116 memcpy(conn->features, ev->features, 8);
2117
2118 if (conn->state != BT_CONFIG)
2119 goto unlock;
2120
2121 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2122 struct hci_cp_read_remote_ext_features cp;
2123 cp.handle = ev->handle;
2124 cp.page = 0x01;
2125 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2126 sizeof(cp), &cp);
2127 goto unlock;
2128 }
2129
2130 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2131 struct hci_cp_remote_name_req cp;
2132 memset(&cp, 0, sizeof(cp));
2133 bacpy(&cp.bdaddr, &conn->dst);
2134 cp.pscan_rep_mode = 0x02;
2135 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2136 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2137 mgmt_device_connected(hdev, &conn->dst, conn->type,
2138 conn->dst_type, 0, NULL, 0,
2139 conn->dev_class);
2140
2141 if (!hci_outgoing_auth_needed(hdev, conn)) {
2142 conn->state = BT_CONNECTED;
2143 hci_proto_connect_cfm(conn, ev->status);
2144 hci_conn_put(conn);
2145 }
2146
2147 unlock:
2148 hci_dev_unlock(hdev);
2149 }
2150
2151 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2152 {
2153 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2154 u8 status = skb->data[sizeof(*ev)];
2155 __u16 opcode;
2156
2157 skb_pull(skb, sizeof(*ev));
2158
2159 opcode = __le16_to_cpu(ev->opcode);
2160
2161 switch (opcode) {
2162 case HCI_OP_INQUIRY_CANCEL:
2163 hci_cc_inquiry_cancel(hdev, skb);
2164 break;
2165
2166 case HCI_OP_PERIODIC_INQ:
2167 hci_cc_periodic_inq(hdev, skb);
2168 break;
2169
2170 case HCI_OP_EXIT_PERIODIC_INQ:
2171 hci_cc_exit_periodic_inq(hdev, skb);
2172 break;
2173
2174 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2175 hci_cc_remote_name_req_cancel(hdev, skb);
2176 break;
2177
2178 case HCI_OP_ROLE_DISCOVERY:
2179 hci_cc_role_discovery(hdev, skb);
2180 break;
2181
2182 case HCI_OP_READ_LINK_POLICY:
2183 hci_cc_read_link_policy(hdev, skb);
2184 break;
2185
2186 case HCI_OP_WRITE_LINK_POLICY:
2187 hci_cc_write_link_policy(hdev, skb);
2188 break;
2189
2190 case HCI_OP_READ_DEF_LINK_POLICY:
2191 hci_cc_read_def_link_policy(hdev, skb);
2192 break;
2193
2194 case HCI_OP_WRITE_DEF_LINK_POLICY:
2195 hci_cc_write_def_link_policy(hdev, skb);
2196 break;
2197
2198 case HCI_OP_RESET:
2199 hci_cc_reset(hdev, skb);
2200 break;
2201
2202 case HCI_OP_WRITE_LOCAL_NAME:
2203 hci_cc_write_local_name(hdev, skb);
2204 break;
2205
2206 case HCI_OP_READ_LOCAL_NAME:
2207 hci_cc_read_local_name(hdev, skb);
2208 break;
2209
2210 case HCI_OP_WRITE_AUTH_ENABLE:
2211 hci_cc_write_auth_enable(hdev, skb);
2212 break;
2213
2214 case HCI_OP_WRITE_ENCRYPT_MODE:
2215 hci_cc_write_encrypt_mode(hdev, skb);
2216 break;
2217
2218 case HCI_OP_WRITE_SCAN_ENABLE:
2219 hci_cc_write_scan_enable(hdev, skb);
2220 break;
2221
2222 case HCI_OP_READ_CLASS_OF_DEV:
2223 hci_cc_read_class_of_dev(hdev, skb);
2224 break;
2225
2226 case HCI_OP_WRITE_CLASS_OF_DEV:
2227 hci_cc_write_class_of_dev(hdev, skb);
2228 break;
2229
2230 case HCI_OP_READ_VOICE_SETTING:
2231 hci_cc_read_voice_setting(hdev, skb);
2232 break;
2233
2234 case HCI_OP_WRITE_VOICE_SETTING:
2235 hci_cc_write_voice_setting(hdev, skb);
2236 break;
2237
2238 case HCI_OP_WRITE_SSP_MODE:
2239 hci_cc_write_ssp_mode(hdev, skb);
2240 break;
2241
2242 case HCI_OP_READ_LOCAL_VERSION:
2243 hci_cc_read_local_version(hdev, skb);
2244 break;
2245
2246 case HCI_OP_READ_LOCAL_COMMANDS:
2247 hci_cc_read_local_commands(hdev, skb);
2248 break;
2249
2250 case HCI_OP_READ_LOCAL_FEATURES:
2251 hci_cc_read_local_features(hdev, skb);
2252 break;
2253
2254 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2255 hci_cc_read_local_ext_features(hdev, skb);
2256 break;
2257
2258 case HCI_OP_READ_BUFFER_SIZE:
2259 hci_cc_read_buffer_size(hdev, skb);
2260 break;
2261
2262 case HCI_OP_READ_BD_ADDR:
2263 hci_cc_read_bd_addr(hdev, skb);
2264 break;
2265
2266 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2267 hci_cc_read_page_scan_activity(hdev, skb);
2268 break;
2269
2270 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2271 hci_cc_write_page_scan_activity(hdev, skb);
2272 break;
2273
2274 case HCI_OP_READ_PAGE_SCAN_TYPE:
2275 hci_cc_read_page_scan_type(hdev, skb);
2276 break;
2277
2278 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2279 hci_cc_write_page_scan_type(hdev, skb);
2280 break;
2281
2282 case HCI_OP_READ_DATA_BLOCK_SIZE:
2283 hci_cc_read_data_block_size(hdev, skb);
2284 break;
2285
2286 case HCI_OP_READ_FLOW_CONTROL_MODE:
2287 hci_cc_read_flow_control_mode(hdev, skb);
2288 break;
2289
2290 case HCI_OP_READ_LOCAL_AMP_INFO:
2291 hci_cc_read_local_amp_info(hdev, skb);
2292 break;
2293
2294 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2295 hci_cc_read_local_amp_assoc(hdev, skb);
2296 break;
2297
2298 case HCI_OP_READ_INQ_RSP_TX_POWER:
2299 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2300 break;
2301
2302 case HCI_OP_PIN_CODE_REPLY:
2303 hci_cc_pin_code_reply(hdev, skb);
2304 break;
2305
2306 case HCI_OP_PIN_CODE_NEG_REPLY:
2307 hci_cc_pin_code_neg_reply(hdev, skb);
2308 break;
2309
2310 case HCI_OP_READ_LOCAL_OOB_DATA:
2311 hci_cc_read_local_oob_data_reply(hdev, skb);
2312 break;
2313
2314 case HCI_OP_LE_READ_BUFFER_SIZE:
2315 hci_cc_le_read_buffer_size(hdev, skb);
2316 break;
2317
2318 case HCI_OP_LE_READ_LOCAL_FEATURES:
2319 hci_cc_le_read_local_features(hdev, skb);
2320 break;
2321
2322 case HCI_OP_LE_READ_ADV_TX_POWER:
2323 hci_cc_le_read_adv_tx_power(hdev, skb);
2324 break;
2325
2326 case HCI_OP_USER_CONFIRM_REPLY:
2327 hci_cc_user_confirm_reply(hdev, skb);
2328 break;
2329
2330 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2331 hci_cc_user_confirm_neg_reply(hdev, skb);
2332 break;
2333
2334 case HCI_OP_USER_PASSKEY_REPLY:
2335 hci_cc_user_passkey_reply(hdev, skb);
2336 break;
2337
2338 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2339 hci_cc_user_passkey_neg_reply(hdev, skb);
2340 break;
2341
2342 case HCI_OP_LE_SET_SCAN_PARAM:
2343 hci_cc_le_set_scan_param(hdev, skb);
2344 break;
2345
2346 case HCI_OP_LE_SET_ADV_ENABLE:
2347 hci_cc_le_set_adv_enable(hdev, skb);
2348 break;
2349
2350 case HCI_OP_LE_SET_SCAN_ENABLE:
2351 hci_cc_le_set_scan_enable(hdev, skb);
2352 break;
2353
2354 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2355 hci_cc_le_read_white_list_size(hdev, skb);
2356 break;
2357
2358 case HCI_OP_LE_READ_SUPPORTED_STATES:
2359 hci_cc_le_read_supported_states(hdev, skb);
2360 break;
2361
2362 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2363 hci_cc_write_le_host_supported(hdev, skb);
2364 break;
2365
2366 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2367 hci_cc_write_remote_amp_assoc(hdev, skb);
2368 break;
2369
2370 default:
2371 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2372 break;
2373 }
2374
2375 if (opcode != HCI_OP_NOP)
2376 del_timer(&hdev->cmd_timer);
2377
2378 hci_req_cmd_complete(hdev, opcode, status);
2379
2380 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2381 atomic_set(&hdev->cmd_cnt, 1);
2382 if (!skb_queue_empty(&hdev->cmd_q))
2383 queue_work(hdev->workqueue, &hdev->cmd_work);
2384 }
2385 }
2386
2387 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2388 {
2389 struct hci_ev_cmd_status *ev = (void *) skb->data;
2390 __u16 opcode;
2391
2392 skb_pull(skb, sizeof(*ev));
2393
2394 opcode = __le16_to_cpu(ev->opcode);
2395
2396 switch (opcode) {
2397 case HCI_OP_INQUIRY:
2398 hci_cs_inquiry(hdev, ev->status);
2399 break;
2400
2401 case HCI_OP_CREATE_CONN:
2402 hci_cs_create_conn(hdev, ev->status);
2403 break;
2404
2405 case HCI_OP_ADD_SCO:
2406 hci_cs_add_sco(hdev, ev->status);
2407 break;
2408
2409 case HCI_OP_AUTH_REQUESTED:
2410 hci_cs_auth_requested(hdev, ev->status);
2411 break;
2412
2413 case HCI_OP_SET_CONN_ENCRYPT:
2414 hci_cs_set_conn_encrypt(hdev, ev->status);
2415 break;
2416
2417 case HCI_OP_REMOTE_NAME_REQ:
2418 hci_cs_remote_name_req(hdev, ev->status);
2419 break;
2420
2421 case HCI_OP_READ_REMOTE_FEATURES:
2422 hci_cs_read_remote_features(hdev, ev->status);
2423 break;
2424
2425 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2426 hci_cs_read_remote_ext_features(hdev, ev->status);
2427 break;
2428
2429 case HCI_OP_SETUP_SYNC_CONN:
2430 hci_cs_setup_sync_conn(hdev, ev->status);
2431 break;
2432
2433 case HCI_OP_SNIFF_MODE:
2434 hci_cs_sniff_mode(hdev, ev->status);
2435 break;
2436
2437 case HCI_OP_EXIT_SNIFF_MODE:
2438 hci_cs_exit_sniff_mode(hdev, ev->status);
2439 break;
2440
2441 case HCI_OP_DISCONNECT:
2442 hci_cs_disconnect(hdev, ev->status);
2443 break;
2444
2445 case HCI_OP_LE_CREATE_CONN:
2446 hci_cs_le_create_conn(hdev, ev->status);
2447 break;
2448
2449 case HCI_OP_CREATE_PHY_LINK:
2450 hci_cs_create_phylink(hdev, ev->status);
2451 break;
2452
2453 case HCI_OP_ACCEPT_PHY_LINK:
2454 hci_cs_accept_phylink(hdev, ev->status);
2455 break;
2456
2457 default:
2458 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2459 break;
2460 }
2461
2462 if (opcode != HCI_OP_NOP)
2463 del_timer(&hdev->cmd_timer);
2464
2465 hci_req_cmd_status(hdev, opcode, ev->status);
2466
2467 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2468 atomic_set(&hdev->cmd_cnt, 1);
2469 if (!skb_queue_empty(&hdev->cmd_q))
2470 queue_work(hdev->workqueue, &hdev->cmd_work);
2471 }
2472 }
2473
2474 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2475 {
2476 struct hci_ev_role_change *ev = (void *) skb->data;
2477 struct hci_conn *conn;
2478
2479 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2480
2481 hci_dev_lock(hdev);
2482
2483 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2484 if (conn) {
2485 if (!ev->status) {
2486 if (ev->role)
2487 conn->link_mode &= ~HCI_LM_MASTER;
2488 else
2489 conn->link_mode |= HCI_LM_MASTER;
2490 }
2491
2492 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2493
2494 hci_role_switch_cfm(conn, ev->status, ev->role);
2495 }
2496
2497 hci_dev_unlock(hdev);
2498 }
2499
2500 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2501 {
2502 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2503 int i;
2504
2505 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2506 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2507 return;
2508 }
2509
2510 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2511 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2512 BT_DBG("%s bad parameters", hdev->name);
2513 return;
2514 }
2515
2516 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2517
2518 for (i = 0; i < ev->num_hndl; i++) {
2519 struct hci_comp_pkts_info *info = &ev->handles[i];
2520 struct hci_conn *conn;
2521 __u16 handle, count;
2522
2523 handle = __le16_to_cpu(info->handle);
2524 count = __le16_to_cpu(info->count);
2525
2526 conn = hci_conn_hash_lookup_handle(hdev, handle);
2527 if (!conn)
2528 continue;
2529
2530 conn->sent -= count;
2531
2532 switch (conn->type) {
2533 case ACL_LINK:
2534 hdev->acl_cnt += count;
2535 if (hdev->acl_cnt > hdev->acl_pkts)
2536 hdev->acl_cnt = hdev->acl_pkts;
2537 break;
2538
2539 case LE_LINK:
2540 if (hdev->le_pkts) {
2541 hdev->le_cnt += count;
2542 if (hdev->le_cnt > hdev->le_pkts)
2543 hdev->le_cnt = hdev->le_pkts;
2544 } else {
2545 hdev->acl_cnt += count;
2546 if (hdev->acl_cnt > hdev->acl_pkts)
2547 hdev->acl_cnt = hdev->acl_pkts;
2548 }
2549 break;
2550
2551 case SCO_LINK:
2552 hdev->sco_cnt += count;
2553 if (hdev->sco_cnt > hdev->sco_pkts)
2554 hdev->sco_cnt = hdev->sco_pkts;
2555 break;
2556
2557 default:
2558 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2559 break;
2560 }
2561 }
2562
2563 queue_work(hdev->workqueue, &hdev->tx_work);
2564 }
2565
2566 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2567 __u16 handle)
2568 {
2569 struct hci_chan *chan;
2570
2571 switch (hdev->dev_type) {
2572 case HCI_BREDR:
2573 return hci_conn_hash_lookup_handle(hdev, handle);
2574 case HCI_AMP:
2575 chan = hci_chan_lookup_handle(hdev, handle);
2576 if (chan)
2577 return chan->conn;
2578 break;
2579 default:
2580 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2581 break;
2582 }
2583
2584 return NULL;
2585 }
2586
2587 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2588 {
2589 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2590 int i;
2591
2592 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2593 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2594 return;
2595 }
2596
2597 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2598 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2599 BT_DBG("%s bad parameters", hdev->name);
2600 return;
2601 }
2602
2603 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2604 ev->num_hndl);
2605
2606 for (i = 0; i < ev->num_hndl; i++) {
2607 struct hci_comp_blocks_info *info = &ev->handles[i];
2608 struct hci_conn *conn = NULL;
2609 __u16 handle, block_count;
2610
2611 handle = __le16_to_cpu(info->handle);
2612 block_count = __le16_to_cpu(info->blocks);
2613
2614 conn = __hci_conn_lookup_handle(hdev, handle);
2615 if (!conn)
2616 continue;
2617
2618 conn->sent -= block_count;
2619
2620 switch (conn->type) {
2621 case ACL_LINK:
2622 case AMP_LINK:
2623 hdev->block_cnt += block_count;
2624 if (hdev->block_cnt > hdev->num_blocks)
2625 hdev->block_cnt = hdev->num_blocks;
2626 break;
2627
2628 default:
2629 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2630 break;
2631 }
2632 }
2633
2634 queue_work(hdev->workqueue, &hdev->tx_work);
2635 }
2636
2637 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2638 {
2639 struct hci_ev_mode_change *ev = (void *) skb->data;
2640 struct hci_conn *conn;
2641
2642 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2643
2644 hci_dev_lock(hdev);
2645
2646 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2647 if (conn) {
2648 conn->mode = ev->mode;
2649 conn->interval = __le16_to_cpu(ev->interval);
2650
2651 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2652 &conn->flags)) {
2653 if (conn->mode == HCI_CM_ACTIVE)
2654 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2655 else
2656 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2657 }
2658
2659 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2660 hci_sco_setup(conn, ev->status);
2661 }
2662
2663 hci_dev_unlock(hdev);
2664 }
2665
2666 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2667 {
2668 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2669 struct hci_conn *conn;
2670
2671 BT_DBG("%s", hdev->name);
2672
2673 hci_dev_lock(hdev);
2674
2675 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2676 if (!conn)
2677 goto unlock;
2678
2679 if (conn->state == BT_CONNECTED) {
2680 hci_conn_hold(conn);
2681 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2682 hci_conn_put(conn);
2683 }
2684
2685 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2686 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2687 sizeof(ev->bdaddr), &ev->bdaddr);
2688 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2689 u8 secure;
2690
2691 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2692 secure = 1;
2693 else
2694 secure = 0;
2695
2696 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2697 }
2698
2699 unlock:
2700 hci_dev_unlock(hdev);
2701 }
2702
2703 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2704 {
2705 struct hci_ev_link_key_req *ev = (void *) skb->data;
2706 struct hci_cp_link_key_reply cp;
2707 struct hci_conn *conn;
2708 struct link_key *key;
2709
2710 BT_DBG("%s", hdev->name);
2711
2712 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2713 return;
2714
2715 hci_dev_lock(hdev);
2716
2717 key = hci_find_link_key(hdev, &ev->bdaddr);
2718 if (!key) {
2719 BT_DBG("%s link key not found for %pMR", hdev->name,
2720 &ev->bdaddr);
2721 goto not_found;
2722 }
2723
2724 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2725 &ev->bdaddr);
2726
2727 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2728 key->type == HCI_LK_DEBUG_COMBINATION) {
2729 BT_DBG("%s ignoring debug key", hdev->name);
2730 goto not_found;
2731 }
2732
2733 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2734 if (conn) {
2735 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2736 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2737 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2738 goto not_found;
2739 }
2740
2741 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2742 conn->pending_sec_level == BT_SECURITY_HIGH) {
2743 BT_DBG("%s ignoring key unauthenticated for high security",
2744 hdev->name);
2745 goto not_found;
2746 }
2747
2748 conn->key_type = key->type;
2749 conn->pin_length = key->pin_len;
2750 }
2751
2752 bacpy(&cp.bdaddr, &ev->bdaddr);
2753 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2754
2755 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2756
2757 hci_dev_unlock(hdev);
2758
2759 return;
2760
2761 not_found:
2762 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2763 hci_dev_unlock(hdev);
2764 }
2765
2766 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2767 {
2768 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2769 struct hci_conn *conn;
2770 u8 pin_len = 0;
2771
2772 BT_DBG("%s", hdev->name);
2773
2774 hci_dev_lock(hdev);
2775
2776 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2777 if (conn) {
2778 hci_conn_hold(conn);
2779 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2780 pin_len = conn->pin_length;
2781
2782 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2783 conn->key_type = ev->key_type;
2784
2785 hci_conn_put(conn);
2786 }
2787
2788 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2789 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2790 ev->key_type, pin_len);
2791
2792 hci_dev_unlock(hdev);
2793 }
2794
2795 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2796 {
2797 struct hci_ev_clock_offset *ev = (void *) skb->data;
2798 struct hci_conn *conn;
2799
2800 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2801
2802 hci_dev_lock(hdev);
2803
2804 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2805 if (conn && !ev->status) {
2806 struct inquiry_entry *ie;
2807
2808 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2809 if (ie) {
2810 ie->data.clock_offset = ev->clock_offset;
2811 ie->timestamp = jiffies;
2812 }
2813 }
2814
2815 hci_dev_unlock(hdev);
2816 }
2817
2818 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2819 {
2820 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2821 struct hci_conn *conn;
2822
2823 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2824
2825 hci_dev_lock(hdev);
2826
2827 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2828 if (conn && !ev->status)
2829 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2830
2831 hci_dev_unlock(hdev);
2832 }
2833
2834 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2835 {
2836 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2837 struct inquiry_entry *ie;
2838
2839 BT_DBG("%s", hdev->name);
2840
2841 hci_dev_lock(hdev);
2842
2843 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2844 if (ie) {
2845 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2846 ie->timestamp = jiffies;
2847 }
2848
2849 hci_dev_unlock(hdev);
2850 }
2851
2852 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2853 struct sk_buff *skb)
2854 {
2855 struct inquiry_data data;
2856 int num_rsp = *((__u8 *) skb->data);
2857 bool name_known, ssp;
2858
2859 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2860
2861 if (!num_rsp)
2862 return;
2863
2864 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2865 return;
2866
2867 hci_dev_lock(hdev);
2868
2869 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2870 struct inquiry_info_with_rssi_and_pscan_mode *info;
2871 info = (void *) (skb->data + 1);
2872
2873 for (; num_rsp; num_rsp--, info++) {
2874 bacpy(&data.bdaddr, &info->bdaddr);
2875 data.pscan_rep_mode = info->pscan_rep_mode;
2876 data.pscan_period_mode = info->pscan_period_mode;
2877 data.pscan_mode = info->pscan_mode;
2878 memcpy(data.dev_class, info->dev_class, 3);
2879 data.clock_offset = info->clock_offset;
2880 data.rssi = info->rssi;
2881 data.ssp_mode = 0x00;
2882
2883 name_known = hci_inquiry_cache_update(hdev, &data,
2884 false, &ssp);
2885 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2886 info->dev_class, info->rssi,
2887 !name_known, ssp, NULL, 0);
2888 }
2889 } else {
2890 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2891
2892 for (; num_rsp; num_rsp--, info++) {
2893 bacpy(&data.bdaddr, &info->bdaddr);
2894 data.pscan_rep_mode = info->pscan_rep_mode;
2895 data.pscan_period_mode = info->pscan_period_mode;
2896 data.pscan_mode = 0x00;
2897 memcpy(data.dev_class, info->dev_class, 3);
2898 data.clock_offset = info->clock_offset;
2899 data.rssi = info->rssi;
2900 data.ssp_mode = 0x00;
2901 name_known = hci_inquiry_cache_update(hdev, &data,
2902 false, &ssp);
2903 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2904 info->dev_class, info->rssi,
2905 !name_known, ssp, NULL, 0);
2906 }
2907 }
2908
2909 hci_dev_unlock(hdev);
2910 }
2911
2912 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2913 struct sk_buff *skb)
2914 {
2915 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2916 struct hci_conn *conn;
2917
2918 BT_DBG("%s", hdev->name);
2919
2920 hci_dev_lock(hdev);
2921
2922 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2923 if (!conn)
2924 goto unlock;
2925
2926 if (!ev->status && ev->page == 0x01) {
2927 struct inquiry_entry *ie;
2928
2929 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2930 if (ie)
2931 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2932
2933 if (ev->features[0] & LMP_HOST_SSP)
2934 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2935 }
2936
2937 if (conn->state != BT_CONFIG)
2938 goto unlock;
2939
2940 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2941 struct hci_cp_remote_name_req cp;
2942 memset(&cp, 0, sizeof(cp));
2943 bacpy(&cp.bdaddr, &conn->dst);
2944 cp.pscan_rep_mode = 0x02;
2945 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2946 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2947 mgmt_device_connected(hdev, &conn->dst, conn->type,
2948 conn->dst_type, 0, NULL, 0,
2949 conn->dev_class);
2950
2951 if (!hci_outgoing_auth_needed(hdev, conn)) {
2952 conn->state = BT_CONNECTED;
2953 hci_proto_connect_cfm(conn, ev->status);
2954 hci_conn_put(conn);
2955 }
2956
2957 unlock:
2958 hci_dev_unlock(hdev);
2959 }
2960
2961 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2962 struct sk_buff *skb)
2963 {
2964 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2965 struct hci_conn *conn;
2966
2967 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2968
2969 hci_dev_lock(hdev);
2970
2971 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2972 if (!conn) {
2973 if (ev->link_type == ESCO_LINK)
2974 goto unlock;
2975
2976 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2977 if (!conn)
2978 goto unlock;
2979
2980 conn->type = SCO_LINK;
2981 }
2982
2983 switch (ev->status) {
2984 case 0x00:
2985 conn->handle = __le16_to_cpu(ev->handle);
2986 conn->state = BT_CONNECTED;
2987
2988 hci_conn_hold_device(conn);
2989 hci_conn_add_sysfs(conn);
2990 break;
2991
2992 case 0x11: /* Unsupported Feature or Parameter Value */
2993 case 0x1c: /* SCO interval rejected */
2994 case 0x1a: /* Unsupported Remote Feature */
2995 case 0x1f: /* Unspecified error */
2996 if (conn->out && conn->attempt < 2) {
2997 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2998 (hdev->esco_type & EDR_ESCO_MASK);
2999 hci_setup_sync(conn, conn->link->handle);
3000 goto unlock;
3001 }
3002 /* fall through */
3003
3004 default:
3005 conn->state = BT_CLOSED;
3006 break;
3007 }
3008
3009 hci_proto_connect_cfm(conn, ev->status);
3010 if (ev->status)
3011 hci_conn_del(conn);
3012
3013 unlock:
3014 hci_dev_unlock(hdev);
3015 }
3016
3017 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3018 struct sk_buff *skb)
3019 {
3020 struct inquiry_data data;
3021 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3022 int num_rsp = *((__u8 *) skb->data);
3023 size_t eir_len;
3024
3025 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3026
3027 if (!num_rsp)
3028 return;
3029
3030 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3031 return;
3032
3033 hci_dev_lock(hdev);
3034
3035 for (; num_rsp; num_rsp--, info++) {
3036 bool name_known, ssp;
3037
3038 bacpy(&data.bdaddr, &info->bdaddr);
3039 data.pscan_rep_mode = info->pscan_rep_mode;
3040 data.pscan_period_mode = info->pscan_period_mode;
3041 data.pscan_mode = 0x00;
3042 memcpy(data.dev_class, info->dev_class, 3);
3043 data.clock_offset = info->clock_offset;
3044 data.rssi = info->rssi;
3045 data.ssp_mode = 0x01;
3046
3047 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3048 name_known = eir_has_data_type(info->data,
3049 sizeof(info->data),
3050 EIR_NAME_COMPLETE);
3051 else
3052 name_known = true;
3053
3054 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3055 &ssp);
3056 eir_len = eir_get_length(info->data, sizeof(info->data));
3057 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3058 info->dev_class, info->rssi, !name_known,
3059 ssp, info->data, eir_len);
3060 }
3061
3062 hci_dev_unlock(hdev);
3063 }
3064
3065 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3066 struct sk_buff *skb)
3067 {
3068 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3069 struct hci_conn *conn;
3070
3071 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3072 __le16_to_cpu(ev->handle));
3073
3074 hci_dev_lock(hdev);
3075
3076 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3077 if (!conn)
3078 goto unlock;
3079
3080 if (!ev->status)
3081 conn->sec_level = conn->pending_sec_level;
3082
3083 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3084
3085 if (ev->status && conn->state == BT_CONNECTED) {
3086 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3087 hci_conn_put(conn);
3088 goto unlock;
3089 }
3090
3091 if (conn->state == BT_CONFIG) {
3092 if (!ev->status)
3093 conn->state = BT_CONNECTED;
3094
3095 hci_proto_connect_cfm(conn, ev->status);
3096 hci_conn_put(conn);
3097 } else {
3098 hci_auth_cfm(conn, ev->status);
3099
3100 hci_conn_hold(conn);
3101 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3102 hci_conn_put(conn);
3103 }
3104
3105 unlock:
3106 hci_dev_unlock(hdev);
3107 }
3108
3109 static u8 hci_get_auth_req(struct hci_conn *conn)
3110 {
3111 /* If remote requests dedicated bonding follow that lead */
3112 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3113 /* If both remote and local IO capabilities allow MITM
3114 * protection then require it, otherwise don't */
3115 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3116 return 0x02;
3117 else
3118 return 0x03;
3119 }
3120
3121 /* If remote requests no-bonding follow that lead */
3122 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3123 return conn->remote_auth | (conn->auth_type & 0x01);
3124
3125 return conn->auth_type;
3126 }
3127
3128 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3129 {
3130 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3131 struct hci_conn *conn;
3132
3133 BT_DBG("%s", hdev->name);
3134
3135 hci_dev_lock(hdev);
3136
3137 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3138 if (!conn)
3139 goto unlock;
3140
3141 hci_conn_hold(conn);
3142
3143 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3144 goto unlock;
3145
3146 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3147 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3148 struct hci_cp_io_capability_reply cp;
3149
3150 bacpy(&cp.bdaddr, &ev->bdaddr);
3151 /* Change the IO capability from KeyboardDisplay
3152 * to DisplayYesNo as it is not supported by BT spec. */
3153 cp.capability = (conn->io_capability == 0x04) ?
3154 0x01 : conn->io_capability;
3155 conn->auth_type = hci_get_auth_req(conn);
3156 cp.authentication = conn->auth_type;
3157
3158 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3159 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3160 cp.oob_data = 0x01;
3161 else
3162 cp.oob_data = 0x00;
3163
3164 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3165 sizeof(cp), &cp);
3166 } else {
3167 struct hci_cp_io_capability_neg_reply cp;
3168
3169 bacpy(&cp.bdaddr, &ev->bdaddr);
3170 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3171
3172 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3173 sizeof(cp), &cp);
3174 }
3175
3176 unlock:
3177 hci_dev_unlock(hdev);
3178 }
3179
3180 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3181 {
3182 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3183 struct hci_conn *conn;
3184
3185 BT_DBG("%s", hdev->name);
3186
3187 hci_dev_lock(hdev);
3188
3189 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3190 if (!conn)
3191 goto unlock;
3192
3193 conn->remote_cap = ev->capability;
3194 conn->remote_auth = ev->authentication;
3195 if (ev->oob_data)
3196 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3197
3198 unlock:
3199 hci_dev_unlock(hdev);
3200 }
3201
3202 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3203 struct sk_buff *skb)
3204 {
3205 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3206 int loc_mitm, rem_mitm, confirm_hint = 0;
3207 struct hci_conn *conn;
3208
3209 BT_DBG("%s", hdev->name);
3210
3211 hci_dev_lock(hdev);
3212
3213 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3214 goto unlock;
3215
3216 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3217 if (!conn)
3218 goto unlock;
3219
3220 loc_mitm = (conn->auth_type & 0x01);
3221 rem_mitm = (conn->remote_auth & 0x01);
3222
3223 /* If we require MITM but the remote device can't provide that
3224 * (it has NoInputNoOutput) then reject the confirmation
3225 * request. The only exception is when we're dedicated bonding
3226 * initiators (connect_cfm_cb set) since then we always have the MITM
3227 * bit set. */
3228 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3229 BT_DBG("Rejecting request: remote device can't provide MITM");
3230 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3231 sizeof(ev->bdaddr), &ev->bdaddr);
3232 goto unlock;
3233 }
3234
3235 /* If no side requires MITM protection; auto-accept */
3236 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3237 (!rem_mitm || conn->io_capability == 0x03)) {
3238
3239 /* If we're not the initiators request authorization to
3240 * proceed from user space (mgmt_user_confirm with
3241 * confirm_hint set to 1). */
3242 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3243 BT_DBG("Confirming auto-accept as acceptor");
3244 confirm_hint = 1;
3245 goto confirm;
3246 }
3247
3248 BT_DBG("Auto-accept of user confirmation with %ums delay",
3249 hdev->auto_accept_delay);
3250
3251 if (hdev->auto_accept_delay > 0) {
3252 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3253 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3254 goto unlock;
3255 }
3256
3257 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3258 sizeof(ev->bdaddr), &ev->bdaddr);
3259 goto unlock;
3260 }
3261
3262 confirm:
3263 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3264 confirm_hint);
3265
3266 unlock:
3267 hci_dev_unlock(hdev);
3268 }
3269
3270 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3271 struct sk_buff *skb)
3272 {
3273 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3274
3275 BT_DBG("%s", hdev->name);
3276
3277 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3278 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3279 }
3280
3281 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3282 struct sk_buff *skb)
3283 {
3284 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3285 struct hci_conn *conn;
3286
3287 BT_DBG("%s", hdev->name);
3288
3289 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3290 if (!conn)
3291 return;
3292
3293 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3294 conn->passkey_entered = 0;
3295
3296 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3297 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3298 conn->dst_type, conn->passkey_notify,
3299 conn->passkey_entered);
3300 }
3301
3302 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3303 {
3304 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3305 struct hci_conn *conn;
3306
3307 BT_DBG("%s", hdev->name);
3308
3309 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3310 if (!conn)
3311 return;
3312
3313 switch (ev->type) {
3314 case HCI_KEYPRESS_STARTED:
3315 conn->passkey_entered = 0;
3316 return;
3317
3318 case HCI_KEYPRESS_ENTERED:
3319 conn->passkey_entered++;
3320 break;
3321
3322 case HCI_KEYPRESS_ERASED:
3323 conn->passkey_entered--;
3324 break;
3325
3326 case HCI_KEYPRESS_CLEARED:
3327 conn->passkey_entered = 0;
3328 break;
3329
3330 case HCI_KEYPRESS_COMPLETED:
3331 return;
3332 }
3333
3334 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3335 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3336 conn->dst_type, conn->passkey_notify,
3337 conn->passkey_entered);
3338 }
3339
3340 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3341 struct sk_buff *skb)
3342 {
3343 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3344 struct hci_conn *conn;
3345
3346 BT_DBG("%s", hdev->name);
3347
3348 hci_dev_lock(hdev);
3349
3350 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3351 if (!conn)
3352 goto unlock;
3353
3354 /* To avoid duplicate auth_failed events to user space we check
3355 * the HCI_CONN_AUTH_PEND flag which will be set if we
3356 * initiated the authentication. A traditional auth_complete
3357 * event gets always produced as initiator and is also mapped to
3358 * the mgmt_auth_failed event */
3359 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3360 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3361 ev->status);
3362
3363 hci_conn_put(conn);
3364
3365 unlock:
3366 hci_dev_unlock(hdev);
3367 }
3368
3369 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3370 struct sk_buff *skb)
3371 {
3372 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3373 struct inquiry_entry *ie;
3374
3375 BT_DBG("%s", hdev->name);
3376
3377 hci_dev_lock(hdev);
3378
3379 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3380 if (ie)
3381 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3382
3383 hci_dev_unlock(hdev);
3384 }
3385
3386 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3387 struct sk_buff *skb)
3388 {
3389 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3390 struct oob_data *data;
3391
3392 BT_DBG("%s", hdev->name);
3393
3394 hci_dev_lock(hdev);
3395
3396 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3397 goto unlock;
3398
3399 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3400 if (data) {
3401 struct hci_cp_remote_oob_data_reply cp;
3402
3403 bacpy(&cp.bdaddr, &ev->bdaddr);
3404 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3405 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3406
3407 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3408 &cp);
3409 } else {
3410 struct hci_cp_remote_oob_data_neg_reply cp;
3411
3412 bacpy(&cp.bdaddr, &ev->bdaddr);
3413 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3414 &cp);
3415 }
3416
3417 unlock:
3418 hci_dev_unlock(hdev);
3419 }
3420
3421 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3422 struct sk_buff *skb)
3423 {
3424 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3425 struct hci_conn *hcon, *bredr_hcon;
3426
3427 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3428 ev->status);
3429
3430 hci_dev_lock(hdev);
3431
3432 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3433 if (!hcon) {
3434 hci_dev_unlock(hdev);
3435 return;
3436 }
3437
3438 if (ev->status) {
3439 hci_conn_del(hcon);
3440 hci_dev_unlock(hdev);
3441 return;
3442 }
3443
3444 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3445
3446 hcon->state = BT_CONNECTED;
3447 bacpy(&hcon->dst, &bredr_hcon->dst);
3448
3449 hci_conn_hold(hcon);
3450 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3451 hci_conn_put(hcon);
3452
3453 hci_conn_hold_device(hcon);
3454 hci_conn_add_sysfs(hcon);
3455
3456 amp_physical_cfm(bredr_hcon, hcon);
3457
3458 hci_dev_unlock(hdev);
3459 }
3460
3461 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3462 {
3463 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3464 struct hci_conn *hcon;
3465 struct hci_chan *hchan;
3466 struct amp_mgr *mgr;
3467
3468 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3469 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3470 ev->status);
3471
3472 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3473 if (!hcon)
3474 return;
3475
3476 /* Create AMP hchan */
3477 hchan = hci_chan_create(hcon);
3478 if (!hchan)
3479 return;
3480
3481 hchan->handle = le16_to_cpu(ev->handle);
3482
3483 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3484
3485 mgr = hcon->amp_mgr;
3486 if (mgr && mgr->bredr_chan) {
3487 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3488
3489 l2cap_chan_lock(bredr_chan);
3490
3491 bredr_chan->conn->mtu = hdev->block_mtu;
3492 l2cap_logical_cfm(bredr_chan, hchan, 0);
3493 hci_conn_hold(hcon);
3494
3495 l2cap_chan_unlock(bredr_chan);
3496 }
3497 }
3498
3499 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3500 struct sk_buff *skb)
3501 {
3502 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3503 struct hci_chan *hchan;
3504
3505 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3506 le16_to_cpu(ev->handle), ev->status);
3507
3508 if (ev->status)
3509 return;
3510
3511 hci_dev_lock(hdev);
3512
3513 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3514 if (!hchan)
3515 goto unlock;
3516
3517 amp_destroy_logical_link(hchan, ev->reason);
3518
3519 unlock:
3520 hci_dev_unlock(hdev);
3521 }
3522
3523 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3524 struct sk_buff *skb)
3525 {
3526 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3527 struct hci_conn *hcon;
3528
3529 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3530
3531 if (ev->status)
3532 return;
3533
3534 hci_dev_lock(hdev);
3535
3536 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3537 if (hcon) {
3538 hcon->state = BT_CLOSED;
3539 hci_conn_del(hcon);
3540 }
3541
3542 hci_dev_unlock(hdev);
3543 }
3544
3545 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3546 {
3547 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3548 struct hci_conn *conn;
3549
3550 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3551
3552 hci_dev_lock(hdev);
3553
3554 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3555 if (!conn) {
3556 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3557 if (!conn) {
3558 BT_ERR("No memory for new connection");
3559 goto unlock;
3560 }
3561
3562 conn->dst_type = ev->bdaddr_type;
3563
3564 if (ev->role == LE_CONN_ROLE_MASTER) {
3565 conn->out = true;
3566 conn->link_mode |= HCI_LM_MASTER;
3567 }
3568 }
3569
3570 if (ev->status) {
3571 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3572 conn->dst_type, ev->status);
3573 hci_proto_connect_cfm(conn, ev->status);
3574 conn->state = BT_CLOSED;
3575 hci_conn_del(conn);
3576 goto unlock;
3577 }
3578
3579 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3580 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3581 conn->dst_type, 0, NULL, 0, NULL);
3582
3583 conn->sec_level = BT_SECURITY_LOW;
3584 conn->handle = __le16_to_cpu(ev->handle);
3585 conn->state = BT_CONNECTED;
3586
3587 hci_conn_hold_device(conn);
3588 hci_conn_add_sysfs(conn);
3589
3590 hci_proto_connect_cfm(conn, ev->status);
3591
3592 unlock:
3593 hci_dev_unlock(hdev);
3594 }
3595
3596 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3597 {
3598 u8 num_reports = skb->data[0];
3599 void *ptr = &skb->data[1];
3600 s8 rssi;
3601
3602 while (num_reports--) {
3603 struct hci_ev_le_advertising_info *ev = ptr;
3604
3605 rssi = ev->data[ev->length];
3606 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3607 NULL, rssi, 0, 1, ev->data, ev->length);
3608
3609 ptr += sizeof(*ev) + ev->length + 1;
3610 }
3611 }
3612
3613 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3614 {
3615 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3616 struct hci_cp_le_ltk_reply cp;
3617 struct hci_cp_le_ltk_neg_reply neg;
3618 struct hci_conn *conn;
3619 struct smp_ltk *ltk;
3620
3621 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3622
3623 hci_dev_lock(hdev);
3624
3625 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3626 if (conn == NULL)
3627 goto not_found;
3628
3629 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3630 if (ltk == NULL)
3631 goto not_found;
3632
3633 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3634 cp.handle = cpu_to_le16(conn->handle);
3635
3636 if (ltk->authenticated)
3637 conn->sec_level = BT_SECURITY_HIGH;
3638
3639 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3640
3641 if (ltk->type & HCI_SMP_STK) {
3642 list_del(&ltk->list);
3643 kfree(ltk);
3644 }
3645
3646 hci_dev_unlock(hdev);
3647
3648 return;
3649
3650 not_found:
3651 neg.handle = ev->handle;
3652 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3653 hci_dev_unlock(hdev);
3654 }
3655
3656 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3657 {
3658 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3659
3660 skb_pull(skb, sizeof(*le_ev));
3661
3662 switch (le_ev->subevent) {
3663 case HCI_EV_LE_CONN_COMPLETE:
3664 hci_le_conn_complete_evt(hdev, skb);
3665 break;
3666
3667 case HCI_EV_LE_ADVERTISING_REPORT:
3668 hci_le_adv_report_evt(hdev, skb);
3669 break;
3670
3671 case HCI_EV_LE_LTK_REQ:
3672 hci_le_ltk_request_evt(hdev, skb);
3673 break;
3674
3675 default:
3676 break;
3677 }
3678 }
3679
3680 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3681 {
3682 struct hci_ev_channel_selected *ev = (void *) skb->data;
3683 struct hci_conn *hcon;
3684
3685 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3686
3687 skb_pull(skb, sizeof(*ev));
3688
3689 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3690 if (!hcon)
3691 return;
3692
3693 amp_read_loc_assoc_final_data(hdev, hcon);
3694 }
3695
3696 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3697 {
3698 struct hci_event_hdr *hdr = (void *) skb->data;
3699 __u8 event = hdr->evt;
3700
3701 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3702
3703 switch (event) {
3704 case HCI_EV_INQUIRY_COMPLETE:
3705 hci_inquiry_complete_evt(hdev, skb);
3706 break;
3707
3708 case HCI_EV_INQUIRY_RESULT:
3709 hci_inquiry_result_evt(hdev, skb);
3710 break;
3711
3712 case HCI_EV_CONN_COMPLETE:
3713 hci_conn_complete_evt(hdev, skb);
3714 break;
3715
3716 case HCI_EV_CONN_REQUEST:
3717 hci_conn_request_evt(hdev, skb);
3718 break;
3719
3720 case HCI_EV_DISCONN_COMPLETE:
3721 hci_disconn_complete_evt(hdev, skb);
3722 break;
3723
3724 case HCI_EV_AUTH_COMPLETE:
3725 hci_auth_complete_evt(hdev, skb);
3726 break;
3727
3728 case HCI_EV_REMOTE_NAME:
3729 hci_remote_name_evt(hdev, skb);
3730 break;
3731
3732 case HCI_EV_ENCRYPT_CHANGE:
3733 hci_encrypt_change_evt(hdev, skb);
3734 break;
3735
3736 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3737 hci_change_link_key_complete_evt(hdev, skb);
3738 break;
3739
3740 case HCI_EV_REMOTE_FEATURES:
3741 hci_remote_features_evt(hdev, skb);
3742 break;
3743
3744 case HCI_EV_CMD_COMPLETE:
3745 hci_cmd_complete_evt(hdev, skb);
3746 break;
3747
3748 case HCI_EV_CMD_STATUS:
3749 hci_cmd_status_evt(hdev, skb);
3750 break;
3751
3752 case HCI_EV_ROLE_CHANGE:
3753 hci_role_change_evt(hdev, skb);
3754 break;
3755
3756 case HCI_EV_NUM_COMP_PKTS:
3757 hci_num_comp_pkts_evt(hdev, skb);
3758 break;
3759
3760 case HCI_EV_MODE_CHANGE:
3761 hci_mode_change_evt(hdev, skb);
3762 break;
3763
3764 case HCI_EV_PIN_CODE_REQ:
3765 hci_pin_code_request_evt(hdev, skb);
3766 break;
3767
3768 case HCI_EV_LINK_KEY_REQ:
3769 hci_link_key_request_evt(hdev, skb);
3770 break;
3771
3772 case HCI_EV_LINK_KEY_NOTIFY:
3773 hci_link_key_notify_evt(hdev, skb);
3774 break;
3775
3776 case HCI_EV_CLOCK_OFFSET:
3777 hci_clock_offset_evt(hdev, skb);
3778 break;
3779
3780 case HCI_EV_PKT_TYPE_CHANGE:
3781 hci_pkt_type_change_evt(hdev, skb);
3782 break;
3783
3784 case HCI_EV_PSCAN_REP_MODE:
3785 hci_pscan_rep_mode_evt(hdev, skb);
3786 break;
3787
3788 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3789 hci_inquiry_result_with_rssi_evt(hdev, skb);
3790 break;
3791
3792 case HCI_EV_REMOTE_EXT_FEATURES:
3793 hci_remote_ext_features_evt(hdev, skb);
3794 break;
3795
3796 case HCI_EV_SYNC_CONN_COMPLETE:
3797 hci_sync_conn_complete_evt(hdev, skb);
3798 break;
3799
3800 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3801 hci_extended_inquiry_result_evt(hdev, skb);
3802 break;
3803
3804 case HCI_EV_KEY_REFRESH_COMPLETE:
3805 hci_key_refresh_complete_evt(hdev, skb);
3806 break;
3807
3808 case HCI_EV_IO_CAPA_REQUEST:
3809 hci_io_capa_request_evt(hdev, skb);
3810 break;
3811
3812 case HCI_EV_IO_CAPA_REPLY:
3813 hci_io_capa_reply_evt(hdev, skb);
3814 break;
3815
3816 case HCI_EV_USER_CONFIRM_REQUEST:
3817 hci_user_confirm_request_evt(hdev, skb);
3818 break;
3819
3820 case HCI_EV_USER_PASSKEY_REQUEST:
3821 hci_user_passkey_request_evt(hdev, skb);
3822 break;
3823
3824 case HCI_EV_USER_PASSKEY_NOTIFY:
3825 hci_user_passkey_notify_evt(hdev, skb);
3826 break;
3827
3828 case HCI_EV_KEYPRESS_NOTIFY:
3829 hci_keypress_notify_evt(hdev, skb);
3830 break;
3831
3832 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3833 hci_simple_pair_complete_evt(hdev, skb);
3834 break;
3835
3836 case HCI_EV_REMOTE_HOST_FEATURES:
3837 hci_remote_host_features_evt(hdev, skb);
3838 break;
3839
3840 case HCI_EV_LE_META:
3841 hci_le_meta_evt(hdev, skb);
3842 break;
3843
3844 case HCI_EV_CHANNEL_SELECTED:
3845 hci_chan_selected_evt(hdev, skb);
3846 break;
3847
3848 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3849 hci_remote_oob_data_request_evt(hdev, skb);
3850 break;
3851
3852 case HCI_EV_PHY_LINK_COMPLETE:
3853 hci_phy_link_complete_evt(hdev, skb);
3854 break;
3855
3856 case HCI_EV_LOGICAL_LINK_COMPLETE:
3857 hci_loglink_complete_evt(hdev, skb);
3858 break;
3859
3860 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3861 hci_disconn_loglink_complete_evt(hdev, skb);
3862 break;
3863
3864 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3865 hci_disconn_phylink_complete_evt(hdev, skb);
3866 break;
3867
3868 case HCI_EV_NUM_COMP_BLOCKS:
3869 hci_num_comp_blocks_evt(hdev, skb);
3870 break;
3871
3872 default:
3873 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3874 break;
3875 }
3876
3877 kfree_skb(skb);
3878 hdev->stat.evt_rx++;
3879 }