Bluetooth: Move and rename hci_conn_accept
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
34
35 /* Handle HCI Event packets */
36
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 __u8 status = *((__u8 *) skb->data);
40
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
42
43 if (status) {
44 hci_dev_lock(hdev);
45 mgmt_stop_discovery_failed(hdev, status);
46 hci_dev_unlock(hdev);
47 return;
48 }
49
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
52 wake_up_bit(&hdev->flags, HCI_INQUIRY);
53
54 hci_dev_lock(hdev);
55 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
56 hci_dev_unlock(hdev);
57
58 hci_conn_check_pending(hdev);
59 }
60
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 {
63 __u8 status = *((__u8 *) skb->data);
64
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
66
67 if (status)
68 return;
69
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 }
72
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 __u8 status = *((__u8 *) skb->data);
76
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78
79 if (status)
80 return;
81
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83
84 hci_conn_check_pending(hdev);
85 }
86
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct sk_buff *skb)
161 {
162 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163
164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165
166 if (rp->status)
167 return;
168
169 hdev->link_policy = __le16_to_cpu(rp->policy);
170 }
171
172 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 struct sk_buff *skb)
174 {
175 __u8 status = *((__u8 *) skb->data);
176 void *sent;
177
178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
179
180 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 if (!sent)
182 return;
183
184 if (!status)
185 hdev->link_policy = get_unaligned_le16(sent);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 /* Reset all non-persistent flags */
197 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198
199 hdev->discovery.state = DISCOVERY_STOPPED;
200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202
203 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 hdev->adv_data_len = 0;
205 }
206
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208 {
209 __u8 status = *((__u8 *) skb->data);
210 void *sent;
211
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
213
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215 if (!sent)
216 return;
217
218 hci_dev_lock(hdev);
219
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
222 else if (!status)
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224
225 hci_dev_unlock(hdev);
226 }
227
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
231
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233
234 if (rp->status)
235 return;
236
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
245
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
247
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 if (!sent)
250 return;
251
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
254
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
257 else
258 clear_bit(HCI_AUTH, &hdev->flags);
259 }
260
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
263 }
264
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
266 {
267 __u8 status = *((__u8 *) skb->data);
268 void *sent;
269
270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
271
272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
273 if (!sent)
274 return;
275
276 if (!status) {
277 __u8 param = *((__u8 *) sent);
278
279 if (param)
280 set_bit(HCI_ENCRYPT, &hdev->flags);
281 else
282 clear_bit(HCI_ENCRYPT, &hdev->flags);
283 }
284 }
285
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 {
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
290 void *sent;
291
292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
293
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 if (!sent)
296 return;
297
298 param = *((__u8 *) sent);
299
300 hci_dev_lock(hdev);
301
302 if (status) {
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
305 goto done;
306 }
307
308 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310
311 if (param & SCAN_INQUIRY) {
312 set_bit(HCI_ISCAN, &hdev->flags);
313 if (!old_iscan)
314 mgmt_discoverable(hdev, 1);
315 if (hdev->discov_timeout > 0) {
316 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318 to);
319 }
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
322
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
325 if (!old_pscan)
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
329
330 done:
331 hci_dev_unlock(hdev);
332 }
333
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337
338 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
339
340 if (rp->status)
341 return;
342
343 memcpy(hdev->dev_class, rp->dev_class, 3);
344
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 __u8 status = *((__u8 *) skb->data);
352 void *sent;
353
354 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 if (!sent)
358 return;
359
360 hci_dev_lock(hdev);
361
362 if (status == 0)
363 memcpy(hdev->dev_class, sent, 3);
364
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
367
368 hci_dev_unlock(hdev);
369 }
370
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 __u16 setting;
375
376 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
377
378 if (rp->status)
379 return;
380
381 setting = __le16_to_cpu(rp->voice_setting);
382
383 if (hdev->voice_setting == setting)
384 return;
385
386 hdev->voice_setting = setting;
387
388 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
389
390 if (hdev->notify)
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
395 struct sk_buff *skb)
396 {
397 __u8 status = *((__u8 *) skb->data);
398 __u16 setting;
399 void *sent;
400
401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
402
403 if (status)
404 return;
405
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 if (!sent)
408 return;
409
410 setting = get_unaligned_le16(sent);
411
412 if (hdev->voice_setting == setting)
413 return;
414
415 hdev->voice_setting = setting;
416
417 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
418
419 if (hdev->notify)
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421 }
422
423 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 __u8 status = *((__u8 *) skb->data);
426 struct hci_cp_write_ssp_mode *sent;
427
428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
429
430 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
431 if (!sent)
432 return;
433
434 if (!status) {
435 if (sent->mode)
436 hdev->host_features[0] |= LMP_HOST_SSP;
437 else
438 hdev->host_features[0] &= ~LMP_HOST_SSP;
439 }
440
441 if (test_bit(HCI_MGMT, &hdev->dev_flags))
442 mgmt_ssp_enable_complete(hdev, sent->mode, status);
443 else if (!status) {
444 if (sent->mode)
445 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
446 else
447 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 }
449 }
450
451 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
452 {
453 struct hci_rp_read_local_version *rp = (void *) skb->data;
454
455 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
456
457 if (rp->status)
458 return;
459
460 hdev->hci_ver = rp->hci_ver;
461 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
462 hdev->lmp_ver = rp->lmp_ver;
463 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
464 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
465
466 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
467 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
468 }
469
470 static void hci_cc_read_local_commands(struct hci_dev *hdev,
471 struct sk_buff *skb)
472 {
473 struct hci_rp_read_local_commands *rp = (void *) skb->data;
474
475 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
476
477 if (!rp->status)
478 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
479 }
480
481 static void hci_cc_read_local_features(struct hci_dev *hdev,
482 struct sk_buff *skb)
483 {
484 struct hci_rp_read_local_features *rp = (void *) skb->data;
485
486 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
487
488 if (rp->status)
489 return;
490
491 memcpy(hdev->features, rp->features, 8);
492
493 /* Adjust default settings according to features
494 * supported by device. */
495
496 if (hdev->features[0] & LMP_3SLOT)
497 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
498
499 if (hdev->features[0] & LMP_5SLOT)
500 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
501
502 if (hdev->features[1] & LMP_HV2) {
503 hdev->pkt_type |= (HCI_HV2);
504 hdev->esco_type |= (ESCO_HV2);
505 }
506
507 if (hdev->features[1] & LMP_HV3) {
508 hdev->pkt_type |= (HCI_HV3);
509 hdev->esco_type |= (ESCO_HV3);
510 }
511
512 if (lmp_esco_capable(hdev))
513 hdev->esco_type |= (ESCO_EV3);
514
515 if (hdev->features[4] & LMP_EV4)
516 hdev->esco_type |= (ESCO_EV4);
517
518 if (hdev->features[4] & LMP_EV5)
519 hdev->esco_type |= (ESCO_EV5);
520
521 if (hdev->features[5] & LMP_EDR_ESCO_2M)
522 hdev->esco_type |= (ESCO_2EV3);
523
524 if (hdev->features[5] & LMP_EDR_ESCO_3M)
525 hdev->esco_type |= (ESCO_3EV3);
526
527 if (hdev->features[5] & LMP_EDR_3S_ESCO)
528 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
529
530 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
531 hdev->features[0], hdev->features[1],
532 hdev->features[2], hdev->features[3],
533 hdev->features[4], hdev->features[5],
534 hdev->features[6], hdev->features[7]);
535 }
536
537 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
538 struct sk_buff *skb)
539 {
540 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
541
542 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
543
544 if (rp->status)
545 return;
546
547 switch (rp->page) {
548 case 0:
549 memcpy(hdev->features, rp->features, 8);
550 break;
551 case 1:
552 memcpy(hdev->host_features, rp->features, 8);
553 break;
554 }
555 }
556
557 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
558 struct sk_buff *skb)
559 {
560 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
561
562 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
563
564 if (!rp->status)
565 hdev->flow_ctl_mode = rp->mode;
566 }
567
568 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
569 {
570 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
571
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573
574 if (rp->status)
575 return;
576
577 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
578 hdev->sco_mtu = rp->sco_mtu;
579 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
580 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
581
582 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
583 hdev->sco_mtu = 64;
584 hdev->sco_pkts = 8;
585 }
586
587 hdev->acl_cnt = hdev->acl_pkts;
588 hdev->sco_cnt = hdev->sco_pkts;
589
590 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
591 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
592 }
593
594 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
595 {
596 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
597
598 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
599
600 if (!rp->status)
601 bacpy(&hdev->bdaddr, &rp->bdaddr);
602 }
603
604 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
605 struct sk_buff *skb)
606 {
607 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
608
609 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610
611 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
612 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
613 hdev->page_scan_window = __le16_to_cpu(rp->window);
614 }
615 }
616
617 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
618 struct sk_buff *skb)
619 {
620 u8 status = *((u8 *) skb->data);
621 struct hci_cp_write_page_scan_activity *sent;
622
623 BT_DBG("%s status 0x%2.2x", hdev->name, status);
624
625 if (status)
626 return;
627
628 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
629 if (!sent)
630 return;
631
632 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
633 hdev->page_scan_window = __le16_to_cpu(sent->window);
634 }
635
636 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
637 struct sk_buff *skb)
638 {
639 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
640
641 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
642
643 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
644 hdev->page_scan_type = rp->type;
645 }
646
647 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
648 struct sk_buff *skb)
649 {
650 u8 status = *((u8 *) skb->data);
651 u8 *type;
652
653 BT_DBG("%s status 0x%2.2x", hdev->name, status);
654
655 if (status)
656 return;
657
658 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
659 if (type)
660 hdev->page_scan_type = *type;
661 }
662
663 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
664 struct sk_buff *skb)
665 {
666 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
667
668 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
669
670 if (rp->status)
671 return;
672
673 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
674 hdev->block_len = __le16_to_cpu(rp->block_len);
675 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
676
677 hdev->block_cnt = hdev->num_blocks;
678
679 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
680 hdev->block_cnt, hdev->block_len);
681 }
682
683 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
684 struct sk_buff *skb)
685 {
686 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
687
688 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
689
690 if (rp->status)
691 goto a2mp_rsp;
692
693 hdev->amp_status = rp->amp_status;
694 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
695 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
696 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
697 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
698 hdev->amp_type = rp->amp_type;
699 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
700 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
701 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
702 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
703
704 a2mp_rsp:
705 a2mp_send_getinfo_rsp(hdev);
706 }
707
708 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
709 struct sk_buff *skb)
710 {
711 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
712 struct amp_assoc *assoc = &hdev->loc_assoc;
713 size_t rem_len, frag_len;
714
715 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716
717 if (rp->status)
718 goto a2mp_rsp;
719
720 frag_len = skb->len - sizeof(*rp);
721 rem_len = __le16_to_cpu(rp->rem_len);
722
723 if (rem_len > frag_len) {
724 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
725
726 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
727 assoc->offset += frag_len;
728
729 /* Read other fragments */
730 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
731
732 return;
733 }
734
735 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
736 assoc->len = assoc->offset + rem_len;
737 assoc->offset = 0;
738
739 a2mp_rsp:
740 /* Send A2MP Rsp when all fragments are received */
741 a2mp_send_getampassoc_rsp(hdev, rp->status);
742 a2mp_send_create_phy_link_req(hdev, rp->status);
743 }
744
745 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
746 struct sk_buff *skb)
747 {
748 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
749
750 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
751
752 if (!rp->status)
753 hdev->inq_tx_power = rp->tx_power;
754 }
755
756 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
757 {
758 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
759 struct hci_cp_pin_code_reply *cp;
760 struct hci_conn *conn;
761
762 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
763
764 hci_dev_lock(hdev);
765
766 if (test_bit(HCI_MGMT, &hdev->dev_flags))
767 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
768
769 if (rp->status)
770 goto unlock;
771
772 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
773 if (!cp)
774 goto unlock;
775
776 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
777 if (conn)
778 conn->pin_length = cp->pin_len;
779
780 unlock:
781 hci_dev_unlock(hdev);
782 }
783
784 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
785 {
786 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
787
788 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
789
790 hci_dev_lock(hdev);
791
792 if (test_bit(HCI_MGMT, &hdev->dev_flags))
793 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
794 rp->status);
795
796 hci_dev_unlock(hdev);
797 }
798
799 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
800 struct sk_buff *skb)
801 {
802 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
803
804 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
805
806 if (rp->status)
807 return;
808
809 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
810 hdev->le_pkts = rp->le_max_pkt;
811
812 hdev->le_cnt = hdev->le_pkts;
813
814 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
815 }
816
817 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
818 struct sk_buff *skb)
819 {
820 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
821
822 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823
824 if (!rp->status)
825 memcpy(hdev->le_features, rp->features, 8);
826 }
827
828 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
829 struct sk_buff *skb)
830 {
831 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
832
833 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
834
835 if (!rp->status)
836 hdev->adv_tx_power = rp->tx_power;
837 }
838
839 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
840 {
841 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
842
843 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
844
845 hci_dev_lock(hdev);
846
847 if (test_bit(HCI_MGMT, &hdev->dev_flags))
848 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
849 rp->status);
850
851 hci_dev_unlock(hdev);
852 }
853
854 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
855 struct sk_buff *skb)
856 {
857 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
858
859 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
860
861 hci_dev_lock(hdev);
862
863 if (test_bit(HCI_MGMT, &hdev->dev_flags))
864 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
865 ACL_LINK, 0, rp->status);
866
867 hci_dev_unlock(hdev);
868 }
869
870 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
871 {
872 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
873
874 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
875
876 hci_dev_lock(hdev);
877
878 if (test_bit(HCI_MGMT, &hdev->dev_flags))
879 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
880 0, rp->status);
881
882 hci_dev_unlock(hdev);
883 }
884
885 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
886 struct sk_buff *skb)
887 {
888 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
889
890 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
891
892 hci_dev_lock(hdev);
893
894 if (test_bit(HCI_MGMT, &hdev->dev_flags))
895 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
896 ACL_LINK, 0, rp->status);
897
898 hci_dev_unlock(hdev);
899 }
900
901 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
902 struct sk_buff *skb)
903 {
904 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
905
906 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
907
908 hci_dev_lock(hdev);
909 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
910 rp->randomizer, rp->status);
911 hci_dev_unlock(hdev);
912 }
913
914 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
915 {
916 __u8 *sent, status = *((__u8 *) skb->data);
917
918 BT_DBG("%s status 0x%2.2x", hdev->name, status);
919
920 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
921 if (!sent)
922 return;
923
924 hci_dev_lock(hdev);
925
926 if (!status) {
927 if (*sent)
928 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
929 else
930 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
931 }
932
933 if (!test_bit(HCI_INIT, &hdev->flags)) {
934 struct hci_request req;
935
936 hci_req_init(&req, hdev);
937 hci_update_ad(&req);
938 hci_req_run(&req, NULL);
939 }
940
941 hci_dev_unlock(hdev);
942 }
943
944 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
945 {
946 __u8 status = *((__u8 *) skb->data);
947
948 BT_DBG("%s status 0x%2.2x", hdev->name, status);
949
950 if (status) {
951 hci_dev_lock(hdev);
952 mgmt_start_discovery_failed(hdev, status);
953 hci_dev_unlock(hdev);
954 return;
955 }
956 }
957
958 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
959 struct sk_buff *skb)
960 {
961 struct hci_cp_le_set_scan_enable *cp;
962 __u8 status = *((__u8 *) skb->data);
963
964 BT_DBG("%s status 0x%2.2x", hdev->name, status);
965
966 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
967 if (!cp)
968 return;
969
970 switch (cp->enable) {
971 case LE_SCANNING_ENABLED:
972 if (status) {
973 hci_dev_lock(hdev);
974 mgmt_start_discovery_failed(hdev, status);
975 hci_dev_unlock(hdev);
976 return;
977 }
978
979 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
980
981 hci_dev_lock(hdev);
982 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
983 hci_dev_unlock(hdev);
984 break;
985
986 case LE_SCANNING_DISABLED:
987 if (status) {
988 hci_dev_lock(hdev);
989 mgmt_stop_discovery_failed(hdev, status);
990 hci_dev_unlock(hdev);
991 return;
992 }
993
994 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
995
996 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
997 hdev->discovery.state == DISCOVERY_FINDING) {
998 mgmt_interleaved_discovery(hdev);
999 } else {
1000 hci_dev_lock(hdev);
1001 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1002 hci_dev_unlock(hdev);
1003 }
1004
1005 break;
1006
1007 default:
1008 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1009 break;
1010 }
1011 }
1012
1013 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1014 struct sk_buff *skb)
1015 {
1016 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1017
1018 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1019
1020 if (!rp->status)
1021 hdev->le_white_list_size = rp->size;
1022 }
1023
1024 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1025 struct sk_buff *skb)
1026 {
1027 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1028
1029 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030
1031 if (!rp->status)
1032 memcpy(hdev->le_states, rp->le_states, 8);
1033 }
1034
1035 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1036 struct sk_buff *skb)
1037 {
1038 struct hci_cp_write_le_host_supported *sent;
1039 __u8 status = *((__u8 *) skb->data);
1040
1041 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1042
1043 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1044 if (!sent)
1045 return;
1046
1047 if (!status) {
1048 if (sent->le)
1049 hdev->host_features[0] |= LMP_HOST_LE;
1050 else
1051 hdev->host_features[0] &= ~LMP_HOST_LE;
1052
1053 if (sent->simul)
1054 hdev->host_features[0] |= LMP_HOST_LE_BREDR;
1055 else
1056 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
1057 }
1058
1059 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1060 !test_bit(HCI_INIT, &hdev->flags))
1061 mgmt_le_enable_complete(hdev, sent->le, status);
1062 }
1063
1064 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1065 struct sk_buff *skb)
1066 {
1067 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1068
1069 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1070 hdev->name, rp->status, rp->phy_handle);
1071
1072 if (rp->status)
1073 return;
1074
1075 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1076 }
1077
1078 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1079 {
1080 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1081
1082 if (status) {
1083 hci_conn_check_pending(hdev);
1084 hci_dev_lock(hdev);
1085 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1086 mgmt_start_discovery_failed(hdev, status);
1087 hci_dev_unlock(hdev);
1088 return;
1089 }
1090
1091 set_bit(HCI_INQUIRY, &hdev->flags);
1092
1093 hci_dev_lock(hdev);
1094 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1095 hci_dev_unlock(hdev);
1096 }
1097
1098 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1099 {
1100 struct hci_cp_create_conn *cp;
1101 struct hci_conn *conn;
1102
1103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1104
1105 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1106 if (!cp)
1107 return;
1108
1109 hci_dev_lock(hdev);
1110
1111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1112
1113 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1114
1115 if (status) {
1116 if (conn && conn->state == BT_CONNECT) {
1117 if (status != 0x0c || conn->attempt > 2) {
1118 conn->state = BT_CLOSED;
1119 hci_proto_connect_cfm(conn, status);
1120 hci_conn_del(conn);
1121 } else
1122 conn->state = BT_CONNECT2;
1123 }
1124 } else {
1125 if (!conn) {
1126 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1127 if (conn) {
1128 conn->out = true;
1129 conn->link_mode |= HCI_LM_MASTER;
1130 } else
1131 BT_ERR("No memory for new connection");
1132 }
1133 }
1134
1135 hci_dev_unlock(hdev);
1136 }
1137
1138 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1139 {
1140 struct hci_cp_add_sco *cp;
1141 struct hci_conn *acl, *sco;
1142 __u16 handle;
1143
1144 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1145
1146 if (!status)
1147 return;
1148
1149 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1150 if (!cp)
1151 return;
1152
1153 handle = __le16_to_cpu(cp->handle);
1154
1155 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1156
1157 hci_dev_lock(hdev);
1158
1159 acl = hci_conn_hash_lookup_handle(hdev, handle);
1160 if (acl) {
1161 sco = acl->link;
1162 if (sco) {
1163 sco->state = BT_CLOSED;
1164
1165 hci_proto_connect_cfm(sco, status);
1166 hci_conn_del(sco);
1167 }
1168 }
1169
1170 hci_dev_unlock(hdev);
1171 }
1172
1173 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1174 {
1175 struct hci_cp_auth_requested *cp;
1176 struct hci_conn *conn;
1177
1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1179
1180 if (!status)
1181 return;
1182
1183 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1184 if (!cp)
1185 return;
1186
1187 hci_dev_lock(hdev);
1188
1189 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1190 if (conn) {
1191 if (conn->state == BT_CONFIG) {
1192 hci_proto_connect_cfm(conn, status);
1193 hci_conn_drop(conn);
1194 }
1195 }
1196
1197 hci_dev_unlock(hdev);
1198 }
1199
1200 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1201 {
1202 struct hci_cp_set_conn_encrypt *cp;
1203 struct hci_conn *conn;
1204
1205 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1206
1207 if (!status)
1208 return;
1209
1210 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1211 if (!cp)
1212 return;
1213
1214 hci_dev_lock(hdev);
1215
1216 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1217 if (conn) {
1218 if (conn->state == BT_CONFIG) {
1219 hci_proto_connect_cfm(conn, status);
1220 hci_conn_drop(conn);
1221 }
1222 }
1223
1224 hci_dev_unlock(hdev);
1225 }
1226
1227 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1228 struct hci_conn *conn)
1229 {
1230 if (conn->state != BT_CONFIG || !conn->out)
1231 return 0;
1232
1233 if (conn->pending_sec_level == BT_SECURITY_SDP)
1234 return 0;
1235
1236 /* Only request authentication for SSP connections or non-SSP
1237 * devices with sec_level HIGH or if MITM protection is requested */
1238 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1239 conn->pending_sec_level != BT_SECURITY_HIGH)
1240 return 0;
1241
1242 return 1;
1243 }
1244
1245 static int hci_resolve_name(struct hci_dev *hdev,
1246 struct inquiry_entry *e)
1247 {
1248 struct hci_cp_remote_name_req cp;
1249
1250 memset(&cp, 0, sizeof(cp));
1251
1252 bacpy(&cp.bdaddr, &e->data.bdaddr);
1253 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1254 cp.pscan_mode = e->data.pscan_mode;
1255 cp.clock_offset = e->data.clock_offset;
1256
1257 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1258 }
1259
1260 static bool hci_resolve_next_name(struct hci_dev *hdev)
1261 {
1262 struct discovery_state *discov = &hdev->discovery;
1263 struct inquiry_entry *e;
1264
1265 if (list_empty(&discov->resolve))
1266 return false;
1267
1268 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1269 if (!e)
1270 return false;
1271
1272 if (hci_resolve_name(hdev, e) == 0) {
1273 e->name_state = NAME_PENDING;
1274 return true;
1275 }
1276
1277 return false;
1278 }
1279
1280 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1281 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1282 {
1283 struct discovery_state *discov = &hdev->discovery;
1284 struct inquiry_entry *e;
1285
1286 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1287 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1288 name_len, conn->dev_class);
1289
1290 if (discov->state == DISCOVERY_STOPPED)
1291 return;
1292
1293 if (discov->state == DISCOVERY_STOPPING)
1294 goto discov_complete;
1295
1296 if (discov->state != DISCOVERY_RESOLVING)
1297 return;
1298
1299 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1300 /* If the device was not found in a list of found devices names of which
1301 * are pending. there is no need to continue resolving a next name as it
1302 * will be done upon receiving another Remote Name Request Complete
1303 * Event */
1304 if (!e)
1305 return;
1306
1307 list_del(&e->list);
1308 if (name) {
1309 e->name_state = NAME_KNOWN;
1310 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1311 e->data.rssi, name, name_len);
1312 } else {
1313 e->name_state = NAME_NOT_KNOWN;
1314 }
1315
1316 if (hci_resolve_next_name(hdev))
1317 return;
1318
1319 discov_complete:
1320 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1321 }
1322
1323 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1324 {
1325 struct hci_cp_remote_name_req *cp;
1326 struct hci_conn *conn;
1327
1328 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1329
1330 /* If successful wait for the name req complete event before
1331 * checking for the need to do authentication */
1332 if (!status)
1333 return;
1334
1335 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1336 if (!cp)
1337 return;
1338
1339 hci_dev_lock(hdev);
1340
1341 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1342
1343 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1344 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1345
1346 if (!conn)
1347 goto unlock;
1348
1349 if (!hci_outgoing_auth_needed(hdev, conn))
1350 goto unlock;
1351
1352 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1353 struct hci_cp_auth_requested cp;
1354 cp.handle = __cpu_to_le16(conn->handle);
1355 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1356 }
1357
1358 unlock:
1359 hci_dev_unlock(hdev);
1360 }
1361
1362 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1363 {
1364 struct hci_cp_read_remote_features *cp;
1365 struct hci_conn *conn;
1366
1367 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1368
1369 if (!status)
1370 return;
1371
1372 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1373 if (!cp)
1374 return;
1375
1376 hci_dev_lock(hdev);
1377
1378 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1379 if (conn) {
1380 if (conn->state == BT_CONFIG) {
1381 hci_proto_connect_cfm(conn, status);
1382 hci_conn_drop(conn);
1383 }
1384 }
1385
1386 hci_dev_unlock(hdev);
1387 }
1388
1389 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1390 {
1391 struct hci_cp_read_remote_ext_features *cp;
1392 struct hci_conn *conn;
1393
1394 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1395
1396 if (!status)
1397 return;
1398
1399 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1400 if (!cp)
1401 return;
1402
1403 hci_dev_lock(hdev);
1404
1405 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1406 if (conn) {
1407 if (conn->state == BT_CONFIG) {
1408 hci_proto_connect_cfm(conn, status);
1409 hci_conn_drop(conn);
1410 }
1411 }
1412
1413 hci_dev_unlock(hdev);
1414 }
1415
1416 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1417 {
1418 struct hci_cp_setup_sync_conn *cp;
1419 struct hci_conn *acl, *sco;
1420 __u16 handle;
1421
1422 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1423
1424 if (!status)
1425 return;
1426
1427 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1428 if (!cp)
1429 return;
1430
1431 handle = __le16_to_cpu(cp->handle);
1432
1433 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1434
1435 hci_dev_lock(hdev);
1436
1437 acl = hci_conn_hash_lookup_handle(hdev, handle);
1438 if (acl) {
1439 sco = acl->link;
1440 if (sco) {
1441 sco->state = BT_CLOSED;
1442
1443 hci_proto_connect_cfm(sco, status);
1444 hci_conn_del(sco);
1445 }
1446 }
1447
1448 hci_dev_unlock(hdev);
1449 }
1450
1451 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1452 {
1453 struct hci_cp_sniff_mode *cp;
1454 struct hci_conn *conn;
1455
1456 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1457
1458 if (!status)
1459 return;
1460
1461 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1462 if (!cp)
1463 return;
1464
1465 hci_dev_lock(hdev);
1466
1467 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1468 if (conn) {
1469 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1470
1471 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1472 hci_sco_setup(conn, status);
1473 }
1474
1475 hci_dev_unlock(hdev);
1476 }
1477
1478 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1479 {
1480 struct hci_cp_exit_sniff_mode *cp;
1481 struct hci_conn *conn;
1482
1483 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1484
1485 if (!status)
1486 return;
1487
1488 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1489 if (!cp)
1490 return;
1491
1492 hci_dev_lock(hdev);
1493
1494 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1495 if (conn) {
1496 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1497
1498 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1499 hci_sco_setup(conn, status);
1500 }
1501
1502 hci_dev_unlock(hdev);
1503 }
1504
1505 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1506 {
1507 struct hci_cp_disconnect *cp;
1508 struct hci_conn *conn;
1509
1510 if (!status)
1511 return;
1512
1513 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1514 if (!cp)
1515 return;
1516
1517 hci_dev_lock(hdev);
1518
1519 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1520 if (conn)
1521 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1522 conn->dst_type, status);
1523
1524 hci_dev_unlock(hdev);
1525 }
1526
1527 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1528 {
1529 struct hci_conn *conn;
1530
1531 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1532
1533 if (status) {
1534 hci_dev_lock(hdev);
1535
1536 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1537 if (!conn) {
1538 hci_dev_unlock(hdev);
1539 return;
1540 }
1541
1542 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1543
1544 conn->state = BT_CLOSED;
1545 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1546 conn->dst_type, status);
1547 hci_proto_connect_cfm(conn, status);
1548 hci_conn_del(conn);
1549
1550 hci_dev_unlock(hdev);
1551 }
1552 }
1553
1554 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1555 {
1556 struct hci_cp_create_phy_link *cp;
1557
1558 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1559
1560 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1561 if (!cp)
1562 return;
1563
1564 hci_dev_lock(hdev);
1565
1566 if (status) {
1567 struct hci_conn *hcon;
1568
1569 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1570 if (hcon)
1571 hci_conn_del(hcon);
1572 } else {
1573 amp_write_remote_assoc(hdev, cp->phy_handle);
1574 }
1575
1576 hci_dev_unlock(hdev);
1577 }
1578
1579 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1580 {
1581 struct hci_cp_accept_phy_link *cp;
1582
1583 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1584
1585 if (status)
1586 return;
1587
1588 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1589 if (!cp)
1590 return;
1591
1592 amp_write_remote_assoc(hdev, cp->phy_handle);
1593 }
1594
1595 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1596 {
1597 __u8 status = *((__u8 *) skb->data);
1598 struct discovery_state *discov = &hdev->discovery;
1599 struct inquiry_entry *e;
1600
1601 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1602
1603 hci_conn_check_pending(hdev);
1604
1605 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1606 return;
1607
1608 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1609 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1610
1611 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1612 return;
1613
1614 hci_dev_lock(hdev);
1615
1616 if (discov->state != DISCOVERY_FINDING)
1617 goto unlock;
1618
1619 if (list_empty(&discov->resolve)) {
1620 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1621 goto unlock;
1622 }
1623
1624 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1625 if (e && hci_resolve_name(hdev, e) == 0) {
1626 e->name_state = NAME_PENDING;
1627 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1628 } else {
1629 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1630 }
1631
1632 unlock:
1633 hci_dev_unlock(hdev);
1634 }
1635
1636 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1637 {
1638 struct inquiry_data data;
1639 struct inquiry_info *info = (void *) (skb->data + 1);
1640 int num_rsp = *((__u8 *) skb->data);
1641
1642 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1643
1644 if (!num_rsp)
1645 return;
1646
1647 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1648 return;
1649
1650 hci_dev_lock(hdev);
1651
1652 for (; num_rsp; num_rsp--, info++) {
1653 bool name_known, ssp;
1654
1655 bacpy(&data.bdaddr, &info->bdaddr);
1656 data.pscan_rep_mode = info->pscan_rep_mode;
1657 data.pscan_period_mode = info->pscan_period_mode;
1658 data.pscan_mode = info->pscan_mode;
1659 memcpy(data.dev_class, info->dev_class, 3);
1660 data.clock_offset = info->clock_offset;
1661 data.rssi = 0x00;
1662 data.ssp_mode = 0x00;
1663
1664 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1665 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1666 info->dev_class, 0, !name_known, ssp, NULL,
1667 0);
1668 }
1669
1670 hci_dev_unlock(hdev);
1671 }
1672
1673 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1674 {
1675 struct hci_ev_conn_complete *ev = (void *) skb->data;
1676 struct hci_conn *conn;
1677
1678 BT_DBG("%s", hdev->name);
1679
1680 hci_dev_lock(hdev);
1681
1682 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1683 if (!conn) {
1684 if (ev->link_type != SCO_LINK)
1685 goto unlock;
1686
1687 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1688 if (!conn)
1689 goto unlock;
1690
1691 conn->type = SCO_LINK;
1692 }
1693
1694 if (!ev->status) {
1695 conn->handle = __le16_to_cpu(ev->handle);
1696
1697 if (conn->type == ACL_LINK) {
1698 conn->state = BT_CONFIG;
1699 hci_conn_hold(conn);
1700
1701 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1702 !hci_find_link_key(hdev, &ev->bdaddr))
1703 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1704 else
1705 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1706 } else
1707 conn->state = BT_CONNECTED;
1708
1709 hci_conn_add_sysfs(conn);
1710
1711 if (test_bit(HCI_AUTH, &hdev->flags))
1712 conn->link_mode |= HCI_LM_AUTH;
1713
1714 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1715 conn->link_mode |= HCI_LM_ENCRYPT;
1716
1717 /* Get remote features */
1718 if (conn->type == ACL_LINK) {
1719 struct hci_cp_read_remote_features cp;
1720 cp.handle = ev->handle;
1721 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1722 sizeof(cp), &cp);
1723 }
1724
1725 /* Set packet type for incoming connection */
1726 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1727 struct hci_cp_change_conn_ptype cp;
1728 cp.handle = ev->handle;
1729 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1730 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1731 &cp);
1732 }
1733 } else {
1734 conn->state = BT_CLOSED;
1735 if (conn->type == ACL_LINK)
1736 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1737 conn->dst_type, ev->status);
1738 }
1739
1740 if (conn->type == ACL_LINK)
1741 hci_sco_setup(conn, ev->status);
1742
1743 if (ev->status) {
1744 hci_proto_connect_cfm(conn, ev->status);
1745 hci_conn_del(conn);
1746 } else if (ev->link_type != ACL_LINK)
1747 hci_proto_connect_cfm(conn, ev->status);
1748
1749 unlock:
1750 hci_dev_unlock(hdev);
1751
1752 hci_conn_check_pending(hdev);
1753 }
1754
1755 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1756 {
1757 struct hci_ev_conn_request *ev = (void *) skb->data;
1758 int mask = hdev->link_mode;
1759 __u8 flags = 0;
1760
1761 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1762 ev->link_type);
1763
1764 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1765 &flags);
1766
1767 if ((mask & HCI_LM_ACCEPT) &&
1768 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1769 /* Connection accepted */
1770 struct inquiry_entry *ie;
1771 struct hci_conn *conn;
1772
1773 hci_dev_lock(hdev);
1774
1775 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1776 if (ie)
1777 memcpy(ie->data.dev_class, ev->dev_class, 3);
1778
1779 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1780 &ev->bdaddr);
1781 if (!conn) {
1782 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1783 if (!conn) {
1784 BT_ERR("No memory for new connection");
1785 hci_dev_unlock(hdev);
1786 return;
1787 }
1788 }
1789
1790 memcpy(conn->dev_class, ev->dev_class, 3);
1791
1792 hci_dev_unlock(hdev);
1793
1794 if (ev->link_type == ACL_LINK ||
1795 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1796 struct hci_cp_accept_conn_req cp;
1797 conn->state = BT_CONNECT;
1798
1799 bacpy(&cp.bdaddr, &ev->bdaddr);
1800
1801 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1802 cp.role = 0x00; /* Become master */
1803 else
1804 cp.role = 0x01; /* Remain slave */
1805
1806 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1807 &cp);
1808 } else if (!(flags & HCI_PROTO_DEFER)) {
1809 struct hci_cp_accept_sync_conn_req cp;
1810 conn->state = BT_CONNECT;
1811
1812 bacpy(&cp.bdaddr, &ev->bdaddr);
1813 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1814
1815 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1816 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1817 cp.max_latency = __constant_cpu_to_le16(0xffff);
1818 cp.content_format = cpu_to_le16(hdev->voice_setting);
1819 cp.retrans_effort = 0xff;
1820
1821 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1822 sizeof(cp), &cp);
1823 } else {
1824 conn->state = BT_CONNECT2;
1825 hci_proto_connect_cfm(conn, 0);
1826 }
1827 } else {
1828 /* Connection rejected */
1829 struct hci_cp_reject_conn_req cp;
1830
1831 bacpy(&cp.bdaddr, &ev->bdaddr);
1832 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1833 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1834 }
1835 }
1836
1837 static u8 hci_to_mgmt_reason(u8 err)
1838 {
1839 switch (err) {
1840 case HCI_ERROR_CONNECTION_TIMEOUT:
1841 return MGMT_DEV_DISCONN_TIMEOUT;
1842 case HCI_ERROR_REMOTE_USER_TERM:
1843 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1844 case HCI_ERROR_REMOTE_POWER_OFF:
1845 return MGMT_DEV_DISCONN_REMOTE;
1846 case HCI_ERROR_LOCAL_HOST_TERM:
1847 return MGMT_DEV_DISCONN_LOCAL_HOST;
1848 default:
1849 return MGMT_DEV_DISCONN_UNKNOWN;
1850 }
1851 }
1852
1853 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1854 {
1855 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1856 struct hci_conn *conn;
1857
1858 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1859
1860 hci_dev_lock(hdev);
1861
1862 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1863 if (!conn)
1864 goto unlock;
1865
1866 if (ev->status == 0)
1867 conn->state = BT_CLOSED;
1868
1869 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1870 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1871 if (ev->status) {
1872 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1873 conn->dst_type, ev->status);
1874 } else {
1875 u8 reason = hci_to_mgmt_reason(ev->reason);
1876
1877 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1878 conn->dst_type, reason);
1879 }
1880 }
1881
1882 if (ev->status == 0) {
1883 if (conn->type == ACL_LINK && conn->flush_key)
1884 hci_remove_link_key(hdev, &conn->dst);
1885 hci_proto_disconn_cfm(conn, ev->reason);
1886 hci_conn_del(conn);
1887 }
1888
1889 unlock:
1890 hci_dev_unlock(hdev);
1891 }
1892
1893 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1894 {
1895 struct hci_ev_auth_complete *ev = (void *) skb->data;
1896 struct hci_conn *conn;
1897
1898 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1899
1900 hci_dev_lock(hdev);
1901
1902 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1903 if (!conn)
1904 goto unlock;
1905
1906 if (!ev->status) {
1907 if (!hci_conn_ssp_enabled(conn) &&
1908 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1909 BT_INFO("re-auth of legacy device is not possible.");
1910 } else {
1911 conn->link_mode |= HCI_LM_AUTH;
1912 conn->sec_level = conn->pending_sec_level;
1913 }
1914 } else {
1915 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1916 ev->status);
1917 }
1918
1919 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1920 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1921
1922 if (conn->state == BT_CONFIG) {
1923 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1924 struct hci_cp_set_conn_encrypt cp;
1925 cp.handle = ev->handle;
1926 cp.encrypt = 0x01;
1927 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1928 &cp);
1929 } else {
1930 conn->state = BT_CONNECTED;
1931 hci_proto_connect_cfm(conn, ev->status);
1932 hci_conn_drop(conn);
1933 }
1934 } else {
1935 hci_auth_cfm(conn, ev->status);
1936
1937 hci_conn_hold(conn);
1938 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1939 hci_conn_drop(conn);
1940 }
1941
1942 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1943 if (!ev->status) {
1944 struct hci_cp_set_conn_encrypt cp;
1945 cp.handle = ev->handle;
1946 cp.encrypt = 0x01;
1947 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1948 &cp);
1949 } else {
1950 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1951 hci_encrypt_cfm(conn, ev->status, 0x00);
1952 }
1953 }
1954
1955 unlock:
1956 hci_dev_unlock(hdev);
1957 }
1958
1959 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1960 {
1961 struct hci_ev_remote_name *ev = (void *) skb->data;
1962 struct hci_conn *conn;
1963
1964 BT_DBG("%s", hdev->name);
1965
1966 hci_conn_check_pending(hdev);
1967
1968 hci_dev_lock(hdev);
1969
1970 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1971
1972 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1973 goto check_auth;
1974
1975 if (ev->status == 0)
1976 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1977 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1978 else
1979 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1980
1981 check_auth:
1982 if (!conn)
1983 goto unlock;
1984
1985 if (!hci_outgoing_auth_needed(hdev, conn))
1986 goto unlock;
1987
1988 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1989 struct hci_cp_auth_requested cp;
1990 cp.handle = __cpu_to_le16(conn->handle);
1991 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1992 }
1993
1994 unlock:
1995 hci_dev_unlock(hdev);
1996 }
1997
1998 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1999 {
2000 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2001 struct hci_conn *conn;
2002
2003 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2004
2005 hci_dev_lock(hdev);
2006
2007 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2008 if (conn) {
2009 if (!ev->status) {
2010 if (ev->encrypt) {
2011 /* Encryption implies authentication */
2012 conn->link_mode |= HCI_LM_AUTH;
2013 conn->link_mode |= HCI_LM_ENCRYPT;
2014 conn->sec_level = conn->pending_sec_level;
2015 } else
2016 conn->link_mode &= ~HCI_LM_ENCRYPT;
2017 }
2018
2019 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2020
2021 if (ev->status && conn->state == BT_CONNECTED) {
2022 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2023 hci_conn_drop(conn);
2024 goto unlock;
2025 }
2026
2027 if (conn->state == BT_CONFIG) {
2028 if (!ev->status)
2029 conn->state = BT_CONNECTED;
2030
2031 hci_proto_connect_cfm(conn, ev->status);
2032 hci_conn_drop(conn);
2033 } else
2034 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2035 }
2036
2037 unlock:
2038 hci_dev_unlock(hdev);
2039 }
2040
2041 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2042 struct sk_buff *skb)
2043 {
2044 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2045 struct hci_conn *conn;
2046
2047 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2048
2049 hci_dev_lock(hdev);
2050
2051 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2052 if (conn) {
2053 if (!ev->status)
2054 conn->link_mode |= HCI_LM_SECURE;
2055
2056 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2057
2058 hci_key_change_cfm(conn, ev->status);
2059 }
2060
2061 hci_dev_unlock(hdev);
2062 }
2063
2064 static void hci_remote_features_evt(struct hci_dev *hdev,
2065 struct sk_buff *skb)
2066 {
2067 struct hci_ev_remote_features *ev = (void *) skb->data;
2068 struct hci_conn *conn;
2069
2070 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2071
2072 hci_dev_lock(hdev);
2073
2074 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2075 if (!conn)
2076 goto unlock;
2077
2078 if (!ev->status)
2079 memcpy(conn->features, ev->features, 8);
2080
2081 if (conn->state != BT_CONFIG)
2082 goto unlock;
2083
2084 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2085 struct hci_cp_read_remote_ext_features cp;
2086 cp.handle = ev->handle;
2087 cp.page = 0x01;
2088 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2089 sizeof(cp), &cp);
2090 goto unlock;
2091 }
2092
2093 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2094 struct hci_cp_remote_name_req cp;
2095 memset(&cp, 0, sizeof(cp));
2096 bacpy(&cp.bdaddr, &conn->dst);
2097 cp.pscan_rep_mode = 0x02;
2098 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2099 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2100 mgmt_device_connected(hdev, &conn->dst, conn->type,
2101 conn->dst_type, 0, NULL, 0,
2102 conn->dev_class);
2103
2104 if (!hci_outgoing_auth_needed(hdev, conn)) {
2105 conn->state = BT_CONNECTED;
2106 hci_proto_connect_cfm(conn, ev->status);
2107 hci_conn_drop(conn);
2108 }
2109
2110 unlock:
2111 hci_dev_unlock(hdev);
2112 }
2113
2114 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2115 {
2116 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2117 u8 status = skb->data[sizeof(*ev)];
2118 __u16 opcode;
2119
2120 skb_pull(skb, sizeof(*ev));
2121
2122 opcode = __le16_to_cpu(ev->opcode);
2123
2124 switch (opcode) {
2125 case HCI_OP_INQUIRY_CANCEL:
2126 hci_cc_inquiry_cancel(hdev, skb);
2127 break;
2128
2129 case HCI_OP_PERIODIC_INQ:
2130 hci_cc_periodic_inq(hdev, skb);
2131 break;
2132
2133 case HCI_OP_EXIT_PERIODIC_INQ:
2134 hci_cc_exit_periodic_inq(hdev, skb);
2135 break;
2136
2137 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2138 hci_cc_remote_name_req_cancel(hdev, skb);
2139 break;
2140
2141 case HCI_OP_ROLE_DISCOVERY:
2142 hci_cc_role_discovery(hdev, skb);
2143 break;
2144
2145 case HCI_OP_READ_LINK_POLICY:
2146 hci_cc_read_link_policy(hdev, skb);
2147 break;
2148
2149 case HCI_OP_WRITE_LINK_POLICY:
2150 hci_cc_write_link_policy(hdev, skb);
2151 break;
2152
2153 case HCI_OP_READ_DEF_LINK_POLICY:
2154 hci_cc_read_def_link_policy(hdev, skb);
2155 break;
2156
2157 case HCI_OP_WRITE_DEF_LINK_POLICY:
2158 hci_cc_write_def_link_policy(hdev, skb);
2159 break;
2160
2161 case HCI_OP_RESET:
2162 hci_cc_reset(hdev, skb);
2163 break;
2164
2165 case HCI_OP_WRITE_LOCAL_NAME:
2166 hci_cc_write_local_name(hdev, skb);
2167 break;
2168
2169 case HCI_OP_READ_LOCAL_NAME:
2170 hci_cc_read_local_name(hdev, skb);
2171 break;
2172
2173 case HCI_OP_WRITE_AUTH_ENABLE:
2174 hci_cc_write_auth_enable(hdev, skb);
2175 break;
2176
2177 case HCI_OP_WRITE_ENCRYPT_MODE:
2178 hci_cc_write_encrypt_mode(hdev, skb);
2179 break;
2180
2181 case HCI_OP_WRITE_SCAN_ENABLE:
2182 hci_cc_write_scan_enable(hdev, skb);
2183 break;
2184
2185 case HCI_OP_READ_CLASS_OF_DEV:
2186 hci_cc_read_class_of_dev(hdev, skb);
2187 break;
2188
2189 case HCI_OP_WRITE_CLASS_OF_DEV:
2190 hci_cc_write_class_of_dev(hdev, skb);
2191 break;
2192
2193 case HCI_OP_READ_VOICE_SETTING:
2194 hci_cc_read_voice_setting(hdev, skb);
2195 break;
2196
2197 case HCI_OP_WRITE_VOICE_SETTING:
2198 hci_cc_write_voice_setting(hdev, skb);
2199 break;
2200
2201 case HCI_OP_WRITE_SSP_MODE:
2202 hci_cc_write_ssp_mode(hdev, skb);
2203 break;
2204
2205 case HCI_OP_READ_LOCAL_VERSION:
2206 hci_cc_read_local_version(hdev, skb);
2207 break;
2208
2209 case HCI_OP_READ_LOCAL_COMMANDS:
2210 hci_cc_read_local_commands(hdev, skb);
2211 break;
2212
2213 case HCI_OP_READ_LOCAL_FEATURES:
2214 hci_cc_read_local_features(hdev, skb);
2215 break;
2216
2217 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2218 hci_cc_read_local_ext_features(hdev, skb);
2219 break;
2220
2221 case HCI_OP_READ_BUFFER_SIZE:
2222 hci_cc_read_buffer_size(hdev, skb);
2223 break;
2224
2225 case HCI_OP_READ_BD_ADDR:
2226 hci_cc_read_bd_addr(hdev, skb);
2227 break;
2228
2229 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2230 hci_cc_read_page_scan_activity(hdev, skb);
2231 break;
2232
2233 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2234 hci_cc_write_page_scan_activity(hdev, skb);
2235 break;
2236
2237 case HCI_OP_READ_PAGE_SCAN_TYPE:
2238 hci_cc_read_page_scan_type(hdev, skb);
2239 break;
2240
2241 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2242 hci_cc_write_page_scan_type(hdev, skb);
2243 break;
2244
2245 case HCI_OP_READ_DATA_BLOCK_SIZE:
2246 hci_cc_read_data_block_size(hdev, skb);
2247 break;
2248
2249 case HCI_OP_READ_FLOW_CONTROL_MODE:
2250 hci_cc_read_flow_control_mode(hdev, skb);
2251 break;
2252
2253 case HCI_OP_READ_LOCAL_AMP_INFO:
2254 hci_cc_read_local_amp_info(hdev, skb);
2255 break;
2256
2257 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2258 hci_cc_read_local_amp_assoc(hdev, skb);
2259 break;
2260
2261 case HCI_OP_READ_INQ_RSP_TX_POWER:
2262 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2263 break;
2264
2265 case HCI_OP_PIN_CODE_REPLY:
2266 hci_cc_pin_code_reply(hdev, skb);
2267 break;
2268
2269 case HCI_OP_PIN_CODE_NEG_REPLY:
2270 hci_cc_pin_code_neg_reply(hdev, skb);
2271 break;
2272
2273 case HCI_OP_READ_LOCAL_OOB_DATA:
2274 hci_cc_read_local_oob_data_reply(hdev, skb);
2275 break;
2276
2277 case HCI_OP_LE_READ_BUFFER_SIZE:
2278 hci_cc_le_read_buffer_size(hdev, skb);
2279 break;
2280
2281 case HCI_OP_LE_READ_LOCAL_FEATURES:
2282 hci_cc_le_read_local_features(hdev, skb);
2283 break;
2284
2285 case HCI_OP_LE_READ_ADV_TX_POWER:
2286 hci_cc_le_read_adv_tx_power(hdev, skb);
2287 break;
2288
2289 case HCI_OP_USER_CONFIRM_REPLY:
2290 hci_cc_user_confirm_reply(hdev, skb);
2291 break;
2292
2293 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2294 hci_cc_user_confirm_neg_reply(hdev, skb);
2295 break;
2296
2297 case HCI_OP_USER_PASSKEY_REPLY:
2298 hci_cc_user_passkey_reply(hdev, skb);
2299 break;
2300
2301 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2302 hci_cc_user_passkey_neg_reply(hdev, skb);
2303 break;
2304
2305 case HCI_OP_LE_SET_SCAN_PARAM:
2306 hci_cc_le_set_scan_param(hdev, skb);
2307 break;
2308
2309 case HCI_OP_LE_SET_ADV_ENABLE:
2310 hci_cc_le_set_adv_enable(hdev, skb);
2311 break;
2312
2313 case HCI_OP_LE_SET_SCAN_ENABLE:
2314 hci_cc_le_set_scan_enable(hdev, skb);
2315 break;
2316
2317 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2318 hci_cc_le_read_white_list_size(hdev, skb);
2319 break;
2320
2321 case HCI_OP_LE_READ_SUPPORTED_STATES:
2322 hci_cc_le_read_supported_states(hdev, skb);
2323 break;
2324
2325 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2326 hci_cc_write_le_host_supported(hdev, skb);
2327 break;
2328
2329 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2330 hci_cc_write_remote_amp_assoc(hdev, skb);
2331 break;
2332
2333 default:
2334 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2335 break;
2336 }
2337
2338 if (opcode != HCI_OP_NOP)
2339 del_timer(&hdev->cmd_timer);
2340
2341 hci_req_cmd_complete(hdev, opcode, status);
2342
2343 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2344 atomic_set(&hdev->cmd_cnt, 1);
2345 if (!skb_queue_empty(&hdev->cmd_q))
2346 queue_work(hdev->workqueue, &hdev->cmd_work);
2347 }
2348 }
2349
2350 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2351 {
2352 struct hci_ev_cmd_status *ev = (void *) skb->data;
2353 __u16 opcode;
2354
2355 skb_pull(skb, sizeof(*ev));
2356
2357 opcode = __le16_to_cpu(ev->opcode);
2358
2359 switch (opcode) {
2360 case HCI_OP_INQUIRY:
2361 hci_cs_inquiry(hdev, ev->status);
2362 break;
2363
2364 case HCI_OP_CREATE_CONN:
2365 hci_cs_create_conn(hdev, ev->status);
2366 break;
2367
2368 case HCI_OP_ADD_SCO:
2369 hci_cs_add_sco(hdev, ev->status);
2370 break;
2371
2372 case HCI_OP_AUTH_REQUESTED:
2373 hci_cs_auth_requested(hdev, ev->status);
2374 break;
2375
2376 case HCI_OP_SET_CONN_ENCRYPT:
2377 hci_cs_set_conn_encrypt(hdev, ev->status);
2378 break;
2379
2380 case HCI_OP_REMOTE_NAME_REQ:
2381 hci_cs_remote_name_req(hdev, ev->status);
2382 break;
2383
2384 case HCI_OP_READ_REMOTE_FEATURES:
2385 hci_cs_read_remote_features(hdev, ev->status);
2386 break;
2387
2388 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2389 hci_cs_read_remote_ext_features(hdev, ev->status);
2390 break;
2391
2392 case HCI_OP_SETUP_SYNC_CONN:
2393 hci_cs_setup_sync_conn(hdev, ev->status);
2394 break;
2395
2396 case HCI_OP_SNIFF_MODE:
2397 hci_cs_sniff_mode(hdev, ev->status);
2398 break;
2399
2400 case HCI_OP_EXIT_SNIFF_MODE:
2401 hci_cs_exit_sniff_mode(hdev, ev->status);
2402 break;
2403
2404 case HCI_OP_DISCONNECT:
2405 hci_cs_disconnect(hdev, ev->status);
2406 break;
2407
2408 case HCI_OP_LE_CREATE_CONN:
2409 hci_cs_le_create_conn(hdev, ev->status);
2410 break;
2411
2412 case HCI_OP_CREATE_PHY_LINK:
2413 hci_cs_create_phylink(hdev, ev->status);
2414 break;
2415
2416 case HCI_OP_ACCEPT_PHY_LINK:
2417 hci_cs_accept_phylink(hdev, ev->status);
2418 break;
2419
2420 default:
2421 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2422 break;
2423 }
2424
2425 if (opcode != HCI_OP_NOP)
2426 del_timer(&hdev->cmd_timer);
2427
2428 if (ev->status ||
2429 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2430 hci_req_cmd_complete(hdev, opcode, ev->status);
2431
2432 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2433 atomic_set(&hdev->cmd_cnt, 1);
2434 if (!skb_queue_empty(&hdev->cmd_q))
2435 queue_work(hdev->workqueue, &hdev->cmd_work);
2436 }
2437 }
2438
2439 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2440 {
2441 struct hci_ev_role_change *ev = (void *) skb->data;
2442 struct hci_conn *conn;
2443
2444 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2445
2446 hci_dev_lock(hdev);
2447
2448 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2449 if (conn) {
2450 if (!ev->status) {
2451 if (ev->role)
2452 conn->link_mode &= ~HCI_LM_MASTER;
2453 else
2454 conn->link_mode |= HCI_LM_MASTER;
2455 }
2456
2457 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2458
2459 hci_role_switch_cfm(conn, ev->status, ev->role);
2460 }
2461
2462 hci_dev_unlock(hdev);
2463 }
2464
2465 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2466 {
2467 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2468 int i;
2469
2470 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2471 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2472 return;
2473 }
2474
2475 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2476 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2477 BT_DBG("%s bad parameters", hdev->name);
2478 return;
2479 }
2480
2481 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2482
2483 for (i = 0; i < ev->num_hndl; i++) {
2484 struct hci_comp_pkts_info *info = &ev->handles[i];
2485 struct hci_conn *conn;
2486 __u16 handle, count;
2487
2488 handle = __le16_to_cpu(info->handle);
2489 count = __le16_to_cpu(info->count);
2490
2491 conn = hci_conn_hash_lookup_handle(hdev, handle);
2492 if (!conn)
2493 continue;
2494
2495 conn->sent -= count;
2496
2497 switch (conn->type) {
2498 case ACL_LINK:
2499 hdev->acl_cnt += count;
2500 if (hdev->acl_cnt > hdev->acl_pkts)
2501 hdev->acl_cnt = hdev->acl_pkts;
2502 break;
2503
2504 case LE_LINK:
2505 if (hdev->le_pkts) {
2506 hdev->le_cnt += count;
2507 if (hdev->le_cnt > hdev->le_pkts)
2508 hdev->le_cnt = hdev->le_pkts;
2509 } else {
2510 hdev->acl_cnt += count;
2511 if (hdev->acl_cnt > hdev->acl_pkts)
2512 hdev->acl_cnt = hdev->acl_pkts;
2513 }
2514 break;
2515
2516 case SCO_LINK:
2517 hdev->sco_cnt += count;
2518 if (hdev->sco_cnt > hdev->sco_pkts)
2519 hdev->sco_cnt = hdev->sco_pkts;
2520 break;
2521
2522 default:
2523 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2524 break;
2525 }
2526 }
2527
2528 queue_work(hdev->workqueue, &hdev->tx_work);
2529 }
2530
2531 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2532 __u16 handle)
2533 {
2534 struct hci_chan *chan;
2535
2536 switch (hdev->dev_type) {
2537 case HCI_BREDR:
2538 return hci_conn_hash_lookup_handle(hdev, handle);
2539 case HCI_AMP:
2540 chan = hci_chan_lookup_handle(hdev, handle);
2541 if (chan)
2542 return chan->conn;
2543 break;
2544 default:
2545 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2546 break;
2547 }
2548
2549 return NULL;
2550 }
2551
2552 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2553 {
2554 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2555 int i;
2556
2557 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2558 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2559 return;
2560 }
2561
2562 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2563 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2564 BT_DBG("%s bad parameters", hdev->name);
2565 return;
2566 }
2567
2568 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2569 ev->num_hndl);
2570
2571 for (i = 0; i < ev->num_hndl; i++) {
2572 struct hci_comp_blocks_info *info = &ev->handles[i];
2573 struct hci_conn *conn = NULL;
2574 __u16 handle, block_count;
2575
2576 handle = __le16_to_cpu(info->handle);
2577 block_count = __le16_to_cpu(info->blocks);
2578
2579 conn = __hci_conn_lookup_handle(hdev, handle);
2580 if (!conn)
2581 continue;
2582
2583 conn->sent -= block_count;
2584
2585 switch (conn->type) {
2586 case ACL_LINK:
2587 case AMP_LINK:
2588 hdev->block_cnt += block_count;
2589 if (hdev->block_cnt > hdev->num_blocks)
2590 hdev->block_cnt = hdev->num_blocks;
2591 break;
2592
2593 default:
2594 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2595 break;
2596 }
2597 }
2598
2599 queue_work(hdev->workqueue, &hdev->tx_work);
2600 }
2601
2602 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2603 {
2604 struct hci_ev_mode_change *ev = (void *) skb->data;
2605 struct hci_conn *conn;
2606
2607 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2608
2609 hci_dev_lock(hdev);
2610
2611 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2612 if (conn) {
2613 conn->mode = ev->mode;
2614 conn->interval = __le16_to_cpu(ev->interval);
2615
2616 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2617 &conn->flags)) {
2618 if (conn->mode == HCI_CM_ACTIVE)
2619 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2620 else
2621 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2622 }
2623
2624 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2625 hci_sco_setup(conn, ev->status);
2626 }
2627
2628 hci_dev_unlock(hdev);
2629 }
2630
2631 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2632 {
2633 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2634 struct hci_conn *conn;
2635
2636 BT_DBG("%s", hdev->name);
2637
2638 hci_dev_lock(hdev);
2639
2640 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2641 if (!conn)
2642 goto unlock;
2643
2644 if (conn->state == BT_CONNECTED) {
2645 hci_conn_hold(conn);
2646 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2647 hci_conn_drop(conn);
2648 }
2649
2650 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2651 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2652 sizeof(ev->bdaddr), &ev->bdaddr);
2653 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2654 u8 secure;
2655
2656 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2657 secure = 1;
2658 else
2659 secure = 0;
2660
2661 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2662 }
2663
2664 unlock:
2665 hci_dev_unlock(hdev);
2666 }
2667
2668 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2669 {
2670 struct hci_ev_link_key_req *ev = (void *) skb->data;
2671 struct hci_cp_link_key_reply cp;
2672 struct hci_conn *conn;
2673 struct link_key *key;
2674
2675 BT_DBG("%s", hdev->name);
2676
2677 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2678 return;
2679
2680 hci_dev_lock(hdev);
2681
2682 key = hci_find_link_key(hdev, &ev->bdaddr);
2683 if (!key) {
2684 BT_DBG("%s link key not found for %pMR", hdev->name,
2685 &ev->bdaddr);
2686 goto not_found;
2687 }
2688
2689 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2690 &ev->bdaddr);
2691
2692 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2693 key->type == HCI_LK_DEBUG_COMBINATION) {
2694 BT_DBG("%s ignoring debug key", hdev->name);
2695 goto not_found;
2696 }
2697
2698 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2699 if (conn) {
2700 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2701 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2702 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2703 goto not_found;
2704 }
2705
2706 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2707 conn->pending_sec_level == BT_SECURITY_HIGH) {
2708 BT_DBG("%s ignoring key unauthenticated for high security",
2709 hdev->name);
2710 goto not_found;
2711 }
2712
2713 conn->key_type = key->type;
2714 conn->pin_length = key->pin_len;
2715 }
2716
2717 bacpy(&cp.bdaddr, &ev->bdaddr);
2718 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2719
2720 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2721
2722 hci_dev_unlock(hdev);
2723
2724 return;
2725
2726 not_found:
2727 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2728 hci_dev_unlock(hdev);
2729 }
2730
2731 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2732 {
2733 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2734 struct hci_conn *conn;
2735 u8 pin_len = 0;
2736
2737 BT_DBG("%s", hdev->name);
2738
2739 hci_dev_lock(hdev);
2740
2741 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2742 if (conn) {
2743 hci_conn_hold(conn);
2744 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2745 pin_len = conn->pin_length;
2746
2747 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2748 conn->key_type = ev->key_type;
2749
2750 hci_conn_drop(conn);
2751 }
2752
2753 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2754 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2755 ev->key_type, pin_len);
2756
2757 hci_dev_unlock(hdev);
2758 }
2759
2760 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2761 {
2762 struct hci_ev_clock_offset *ev = (void *) skb->data;
2763 struct hci_conn *conn;
2764
2765 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2766
2767 hci_dev_lock(hdev);
2768
2769 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2770 if (conn && !ev->status) {
2771 struct inquiry_entry *ie;
2772
2773 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2774 if (ie) {
2775 ie->data.clock_offset = ev->clock_offset;
2776 ie->timestamp = jiffies;
2777 }
2778 }
2779
2780 hci_dev_unlock(hdev);
2781 }
2782
2783 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2784 {
2785 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2786 struct hci_conn *conn;
2787
2788 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2789
2790 hci_dev_lock(hdev);
2791
2792 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2793 if (conn && !ev->status)
2794 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2795
2796 hci_dev_unlock(hdev);
2797 }
2798
2799 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2800 {
2801 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2802 struct inquiry_entry *ie;
2803
2804 BT_DBG("%s", hdev->name);
2805
2806 hci_dev_lock(hdev);
2807
2808 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2809 if (ie) {
2810 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2811 ie->timestamp = jiffies;
2812 }
2813
2814 hci_dev_unlock(hdev);
2815 }
2816
2817 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2818 struct sk_buff *skb)
2819 {
2820 struct inquiry_data data;
2821 int num_rsp = *((__u8 *) skb->data);
2822 bool name_known, ssp;
2823
2824 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2825
2826 if (!num_rsp)
2827 return;
2828
2829 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2830 return;
2831
2832 hci_dev_lock(hdev);
2833
2834 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2835 struct inquiry_info_with_rssi_and_pscan_mode *info;
2836 info = (void *) (skb->data + 1);
2837
2838 for (; num_rsp; num_rsp--, info++) {
2839 bacpy(&data.bdaddr, &info->bdaddr);
2840 data.pscan_rep_mode = info->pscan_rep_mode;
2841 data.pscan_period_mode = info->pscan_period_mode;
2842 data.pscan_mode = info->pscan_mode;
2843 memcpy(data.dev_class, info->dev_class, 3);
2844 data.clock_offset = info->clock_offset;
2845 data.rssi = info->rssi;
2846 data.ssp_mode = 0x00;
2847
2848 name_known = hci_inquiry_cache_update(hdev, &data,
2849 false, &ssp);
2850 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2851 info->dev_class, info->rssi,
2852 !name_known, ssp, NULL, 0);
2853 }
2854 } else {
2855 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2856
2857 for (; num_rsp; num_rsp--, info++) {
2858 bacpy(&data.bdaddr, &info->bdaddr);
2859 data.pscan_rep_mode = info->pscan_rep_mode;
2860 data.pscan_period_mode = info->pscan_period_mode;
2861 data.pscan_mode = 0x00;
2862 memcpy(data.dev_class, info->dev_class, 3);
2863 data.clock_offset = info->clock_offset;
2864 data.rssi = info->rssi;
2865 data.ssp_mode = 0x00;
2866 name_known = hci_inquiry_cache_update(hdev, &data,
2867 false, &ssp);
2868 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2869 info->dev_class, info->rssi,
2870 !name_known, ssp, NULL, 0);
2871 }
2872 }
2873
2874 hci_dev_unlock(hdev);
2875 }
2876
2877 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2878 struct sk_buff *skb)
2879 {
2880 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2881 struct hci_conn *conn;
2882
2883 BT_DBG("%s", hdev->name);
2884
2885 hci_dev_lock(hdev);
2886
2887 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2888 if (!conn)
2889 goto unlock;
2890
2891 if (!ev->status && ev->page == 0x01) {
2892 struct inquiry_entry *ie;
2893
2894 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2895 if (ie)
2896 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2897
2898 if (ev->features[0] & LMP_HOST_SSP) {
2899 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2900 } else {
2901 /* It is mandatory by the Bluetooth specification that
2902 * Extended Inquiry Results are only used when Secure
2903 * Simple Pairing is enabled, but some devices violate
2904 * this.
2905 *
2906 * To make these devices work, the internal SSP
2907 * enabled flag needs to be cleared if the remote host
2908 * features do not indicate SSP support */
2909 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2910 }
2911 }
2912
2913 if (conn->state != BT_CONFIG)
2914 goto unlock;
2915
2916 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2917 struct hci_cp_remote_name_req cp;
2918 memset(&cp, 0, sizeof(cp));
2919 bacpy(&cp.bdaddr, &conn->dst);
2920 cp.pscan_rep_mode = 0x02;
2921 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2922 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2923 mgmt_device_connected(hdev, &conn->dst, conn->type,
2924 conn->dst_type, 0, NULL, 0,
2925 conn->dev_class);
2926
2927 if (!hci_outgoing_auth_needed(hdev, conn)) {
2928 conn->state = BT_CONNECTED;
2929 hci_proto_connect_cfm(conn, ev->status);
2930 hci_conn_drop(conn);
2931 }
2932
2933 unlock:
2934 hci_dev_unlock(hdev);
2935 }
2936
2937 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2938 struct sk_buff *skb)
2939 {
2940 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2941 struct hci_conn *conn;
2942
2943 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2944
2945 hci_dev_lock(hdev);
2946
2947 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2948 if (!conn) {
2949 if (ev->link_type == ESCO_LINK)
2950 goto unlock;
2951
2952 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2953 if (!conn)
2954 goto unlock;
2955
2956 conn->type = SCO_LINK;
2957 }
2958
2959 switch (ev->status) {
2960 case 0x00:
2961 conn->handle = __le16_to_cpu(ev->handle);
2962 conn->state = BT_CONNECTED;
2963
2964 hci_conn_add_sysfs(conn);
2965 break;
2966
2967 case 0x11: /* Unsupported Feature or Parameter Value */
2968 case 0x1c: /* SCO interval rejected */
2969 case 0x1a: /* Unsupported Remote Feature */
2970 case 0x1f: /* Unspecified error */
2971 if (conn->out && conn->attempt < 2) {
2972 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2973 (hdev->esco_type & EDR_ESCO_MASK);
2974 hci_setup_sync(conn, conn->link->handle);
2975 goto unlock;
2976 }
2977 /* fall through */
2978
2979 default:
2980 conn->state = BT_CLOSED;
2981 break;
2982 }
2983
2984 hci_proto_connect_cfm(conn, ev->status);
2985 if (ev->status)
2986 hci_conn_del(conn);
2987
2988 unlock:
2989 hci_dev_unlock(hdev);
2990 }
2991
2992 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2993 struct sk_buff *skb)
2994 {
2995 struct inquiry_data data;
2996 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2997 int num_rsp = *((__u8 *) skb->data);
2998 size_t eir_len;
2999
3000 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3001
3002 if (!num_rsp)
3003 return;
3004
3005 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3006 return;
3007
3008 hci_dev_lock(hdev);
3009
3010 for (; num_rsp; num_rsp--, info++) {
3011 bool name_known, ssp;
3012
3013 bacpy(&data.bdaddr, &info->bdaddr);
3014 data.pscan_rep_mode = info->pscan_rep_mode;
3015 data.pscan_period_mode = info->pscan_period_mode;
3016 data.pscan_mode = 0x00;
3017 memcpy(data.dev_class, info->dev_class, 3);
3018 data.clock_offset = info->clock_offset;
3019 data.rssi = info->rssi;
3020 data.ssp_mode = 0x01;
3021
3022 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3023 name_known = eir_has_data_type(info->data,
3024 sizeof(info->data),
3025 EIR_NAME_COMPLETE);
3026 else
3027 name_known = true;
3028
3029 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3030 &ssp);
3031 eir_len = eir_get_length(info->data, sizeof(info->data));
3032 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3033 info->dev_class, info->rssi, !name_known,
3034 ssp, info->data, eir_len);
3035 }
3036
3037 hci_dev_unlock(hdev);
3038 }
3039
3040 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3041 struct sk_buff *skb)
3042 {
3043 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3044 struct hci_conn *conn;
3045
3046 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3047 __le16_to_cpu(ev->handle));
3048
3049 hci_dev_lock(hdev);
3050
3051 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3052 if (!conn)
3053 goto unlock;
3054
3055 if (!ev->status)
3056 conn->sec_level = conn->pending_sec_level;
3057
3058 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3059
3060 if (ev->status && conn->state == BT_CONNECTED) {
3061 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3062 hci_conn_drop(conn);
3063 goto unlock;
3064 }
3065
3066 if (conn->state == BT_CONFIG) {
3067 if (!ev->status)
3068 conn->state = BT_CONNECTED;
3069
3070 hci_proto_connect_cfm(conn, ev->status);
3071 hci_conn_drop(conn);
3072 } else {
3073 hci_auth_cfm(conn, ev->status);
3074
3075 hci_conn_hold(conn);
3076 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3077 hci_conn_drop(conn);
3078 }
3079
3080 unlock:
3081 hci_dev_unlock(hdev);
3082 }
3083
3084 static u8 hci_get_auth_req(struct hci_conn *conn)
3085 {
3086 /* If remote requests dedicated bonding follow that lead */
3087 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3088 /* If both remote and local IO capabilities allow MITM
3089 * protection then require it, otherwise don't */
3090 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3091 return 0x02;
3092 else
3093 return 0x03;
3094 }
3095
3096 /* If remote requests no-bonding follow that lead */
3097 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3098 return conn->remote_auth | (conn->auth_type & 0x01);
3099
3100 return conn->auth_type;
3101 }
3102
3103 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3104 {
3105 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3106 struct hci_conn *conn;
3107
3108 BT_DBG("%s", hdev->name);
3109
3110 hci_dev_lock(hdev);
3111
3112 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3113 if (!conn)
3114 goto unlock;
3115
3116 hci_conn_hold(conn);
3117
3118 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3119 goto unlock;
3120
3121 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3122 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3123 struct hci_cp_io_capability_reply cp;
3124
3125 bacpy(&cp.bdaddr, &ev->bdaddr);
3126 /* Change the IO capability from KeyboardDisplay
3127 * to DisplayYesNo as it is not supported by BT spec. */
3128 cp.capability = (conn->io_capability == 0x04) ?
3129 0x01 : conn->io_capability;
3130 conn->auth_type = hci_get_auth_req(conn);
3131 cp.authentication = conn->auth_type;
3132
3133 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3134 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3135 cp.oob_data = 0x01;
3136 else
3137 cp.oob_data = 0x00;
3138
3139 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3140 sizeof(cp), &cp);
3141 } else {
3142 struct hci_cp_io_capability_neg_reply cp;
3143
3144 bacpy(&cp.bdaddr, &ev->bdaddr);
3145 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3146
3147 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3148 sizeof(cp), &cp);
3149 }
3150
3151 unlock:
3152 hci_dev_unlock(hdev);
3153 }
3154
3155 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3156 {
3157 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3158 struct hci_conn *conn;
3159
3160 BT_DBG("%s", hdev->name);
3161
3162 hci_dev_lock(hdev);
3163
3164 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3165 if (!conn)
3166 goto unlock;
3167
3168 conn->remote_cap = ev->capability;
3169 conn->remote_auth = ev->authentication;
3170 if (ev->oob_data)
3171 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3172
3173 unlock:
3174 hci_dev_unlock(hdev);
3175 }
3176
3177 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3178 struct sk_buff *skb)
3179 {
3180 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3181 int loc_mitm, rem_mitm, confirm_hint = 0;
3182 struct hci_conn *conn;
3183
3184 BT_DBG("%s", hdev->name);
3185
3186 hci_dev_lock(hdev);
3187
3188 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3189 goto unlock;
3190
3191 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3192 if (!conn)
3193 goto unlock;
3194
3195 loc_mitm = (conn->auth_type & 0x01);
3196 rem_mitm = (conn->remote_auth & 0x01);
3197
3198 /* If we require MITM but the remote device can't provide that
3199 * (it has NoInputNoOutput) then reject the confirmation
3200 * request. The only exception is when we're dedicated bonding
3201 * initiators (connect_cfm_cb set) since then we always have the MITM
3202 * bit set. */
3203 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3204 BT_DBG("Rejecting request: remote device can't provide MITM");
3205 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3206 sizeof(ev->bdaddr), &ev->bdaddr);
3207 goto unlock;
3208 }
3209
3210 /* If no side requires MITM protection; auto-accept */
3211 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3212 (!rem_mitm || conn->io_capability == 0x03)) {
3213
3214 /* If we're not the initiators request authorization to
3215 * proceed from user space (mgmt_user_confirm with
3216 * confirm_hint set to 1). */
3217 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3218 BT_DBG("Confirming auto-accept as acceptor");
3219 confirm_hint = 1;
3220 goto confirm;
3221 }
3222
3223 BT_DBG("Auto-accept of user confirmation with %ums delay",
3224 hdev->auto_accept_delay);
3225
3226 if (hdev->auto_accept_delay > 0) {
3227 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3228 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3229 goto unlock;
3230 }
3231
3232 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3233 sizeof(ev->bdaddr), &ev->bdaddr);
3234 goto unlock;
3235 }
3236
3237 confirm:
3238 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3239 confirm_hint);
3240
3241 unlock:
3242 hci_dev_unlock(hdev);
3243 }
3244
3245 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3246 struct sk_buff *skb)
3247 {
3248 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3249
3250 BT_DBG("%s", hdev->name);
3251
3252 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3253 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3254 }
3255
3256 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3257 struct sk_buff *skb)
3258 {
3259 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3260 struct hci_conn *conn;
3261
3262 BT_DBG("%s", hdev->name);
3263
3264 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3265 if (!conn)
3266 return;
3267
3268 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3269 conn->passkey_entered = 0;
3270
3271 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3272 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3273 conn->dst_type, conn->passkey_notify,
3274 conn->passkey_entered);
3275 }
3276
3277 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3278 {
3279 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3280 struct hci_conn *conn;
3281
3282 BT_DBG("%s", hdev->name);
3283
3284 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3285 if (!conn)
3286 return;
3287
3288 switch (ev->type) {
3289 case HCI_KEYPRESS_STARTED:
3290 conn->passkey_entered = 0;
3291 return;
3292
3293 case HCI_KEYPRESS_ENTERED:
3294 conn->passkey_entered++;
3295 break;
3296
3297 case HCI_KEYPRESS_ERASED:
3298 conn->passkey_entered--;
3299 break;
3300
3301 case HCI_KEYPRESS_CLEARED:
3302 conn->passkey_entered = 0;
3303 break;
3304
3305 case HCI_KEYPRESS_COMPLETED:
3306 return;
3307 }
3308
3309 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3310 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3311 conn->dst_type, conn->passkey_notify,
3312 conn->passkey_entered);
3313 }
3314
3315 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3316 struct sk_buff *skb)
3317 {
3318 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3319 struct hci_conn *conn;
3320
3321 BT_DBG("%s", hdev->name);
3322
3323 hci_dev_lock(hdev);
3324
3325 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3326 if (!conn)
3327 goto unlock;
3328
3329 /* To avoid duplicate auth_failed events to user space we check
3330 * the HCI_CONN_AUTH_PEND flag which will be set if we
3331 * initiated the authentication. A traditional auth_complete
3332 * event gets always produced as initiator and is also mapped to
3333 * the mgmt_auth_failed event */
3334 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3335 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3336 ev->status);
3337
3338 hci_conn_drop(conn);
3339
3340 unlock:
3341 hci_dev_unlock(hdev);
3342 }
3343
3344 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3345 struct sk_buff *skb)
3346 {
3347 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3348 struct inquiry_entry *ie;
3349
3350 BT_DBG("%s", hdev->name);
3351
3352 hci_dev_lock(hdev);
3353
3354 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3355 if (ie)
3356 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3357
3358 hci_dev_unlock(hdev);
3359 }
3360
3361 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3362 struct sk_buff *skb)
3363 {
3364 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3365 struct oob_data *data;
3366
3367 BT_DBG("%s", hdev->name);
3368
3369 hci_dev_lock(hdev);
3370
3371 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3372 goto unlock;
3373
3374 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3375 if (data) {
3376 struct hci_cp_remote_oob_data_reply cp;
3377
3378 bacpy(&cp.bdaddr, &ev->bdaddr);
3379 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3380 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3381
3382 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3383 &cp);
3384 } else {
3385 struct hci_cp_remote_oob_data_neg_reply cp;
3386
3387 bacpy(&cp.bdaddr, &ev->bdaddr);
3388 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3389 &cp);
3390 }
3391
3392 unlock:
3393 hci_dev_unlock(hdev);
3394 }
3395
3396 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3397 struct sk_buff *skb)
3398 {
3399 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3400 struct hci_conn *hcon, *bredr_hcon;
3401
3402 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3403 ev->status);
3404
3405 hci_dev_lock(hdev);
3406
3407 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3408 if (!hcon) {
3409 hci_dev_unlock(hdev);
3410 return;
3411 }
3412
3413 if (ev->status) {
3414 hci_conn_del(hcon);
3415 hci_dev_unlock(hdev);
3416 return;
3417 }
3418
3419 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3420
3421 hcon->state = BT_CONNECTED;
3422 bacpy(&hcon->dst, &bredr_hcon->dst);
3423
3424 hci_conn_hold(hcon);
3425 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3426 hci_conn_drop(hcon);
3427
3428 hci_conn_add_sysfs(hcon);
3429
3430 amp_physical_cfm(bredr_hcon, hcon);
3431
3432 hci_dev_unlock(hdev);
3433 }
3434
3435 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3436 {
3437 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3438 struct hci_conn *hcon;
3439 struct hci_chan *hchan;
3440 struct amp_mgr *mgr;
3441
3442 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3443 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3444 ev->status);
3445
3446 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3447 if (!hcon)
3448 return;
3449
3450 /* Create AMP hchan */
3451 hchan = hci_chan_create(hcon);
3452 if (!hchan)
3453 return;
3454
3455 hchan->handle = le16_to_cpu(ev->handle);
3456
3457 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3458
3459 mgr = hcon->amp_mgr;
3460 if (mgr && mgr->bredr_chan) {
3461 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3462
3463 l2cap_chan_lock(bredr_chan);
3464
3465 bredr_chan->conn->mtu = hdev->block_mtu;
3466 l2cap_logical_cfm(bredr_chan, hchan, 0);
3467 hci_conn_hold(hcon);
3468
3469 l2cap_chan_unlock(bredr_chan);
3470 }
3471 }
3472
3473 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3474 struct sk_buff *skb)
3475 {
3476 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3477 struct hci_chan *hchan;
3478
3479 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3480 le16_to_cpu(ev->handle), ev->status);
3481
3482 if (ev->status)
3483 return;
3484
3485 hci_dev_lock(hdev);
3486
3487 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3488 if (!hchan)
3489 goto unlock;
3490
3491 amp_destroy_logical_link(hchan, ev->reason);
3492
3493 unlock:
3494 hci_dev_unlock(hdev);
3495 }
3496
3497 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3498 struct sk_buff *skb)
3499 {
3500 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3501 struct hci_conn *hcon;
3502
3503 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3504
3505 if (ev->status)
3506 return;
3507
3508 hci_dev_lock(hdev);
3509
3510 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3511 if (hcon) {
3512 hcon->state = BT_CLOSED;
3513 hci_conn_del(hcon);
3514 }
3515
3516 hci_dev_unlock(hdev);
3517 }
3518
3519 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3520 {
3521 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3522 struct hci_conn *conn;
3523
3524 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3525
3526 hci_dev_lock(hdev);
3527
3528 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3529 if (!conn) {
3530 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3531 if (!conn) {
3532 BT_ERR("No memory for new connection");
3533 goto unlock;
3534 }
3535
3536 conn->dst_type = ev->bdaddr_type;
3537
3538 if (ev->role == LE_CONN_ROLE_MASTER) {
3539 conn->out = true;
3540 conn->link_mode |= HCI_LM_MASTER;
3541 }
3542 }
3543
3544 if (ev->status) {
3545 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3546 conn->dst_type, ev->status);
3547 hci_proto_connect_cfm(conn, ev->status);
3548 conn->state = BT_CLOSED;
3549 hci_conn_del(conn);
3550 goto unlock;
3551 }
3552
3553 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3554 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3555 conn->dst_type, 0, NULL, 0, NULL);
3556
3557 conn->sec_level = BT_SECURITY_LOW;
3558 conn->handle = __le16_to_cpu(ev->handle);
3559 conn->state = BT_CONNECTED;
3560
3561 hci_conn_add_sysfs(conn);
3562
3563 hci_proto_connect_cfm(conn, ev->status);
3564
3565 unlock:
3566 hci_dev_unlock(hdev);
3567 }
3568
3569 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3570 {
3571 u8 num_reports = skb->data[0];
3572 void *ptr = &skb->data[1];
3573 s8 rssi;
3574
3575 while (num_reports--) {
3576 struct hci_ev_le_advertising_info *ev = ptr;
3577
3578 rssi = ev->data[ev->length];
3579 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3580 NULL, rssi, 0, 1, ev->data, ev->length);
3581
3582 ptr += sizeof(*ev) + ev->length + 1;
3583 }
3584 }
3585
3586 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3587 {
3588 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3589 struct hci_cp_le_ltk_reply cp;
3590 struct hci_cp_le_ltk_neg_reply neg;
3591 struct hci_conn *conn;
3592 struct smp_ltk *ltk;
3593
3594 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3595
3596 hci_dev_lock(hdev);
3597
3598 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3599 if (conn == NULL)
3600 goto not_found;
3601
3602 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3603 if (ltk == NULL)
3604 goto not_found;
3605
3606 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3607 cp.handle = cpu_to_le16(conn->handle);
3608
3609 if (ltk->authenticated)
3610 conn->sec_level = BT_SECURITY_HIGH;
3611
3612 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3613
3614 if (ltk->type & HCI_SMP_STK) {
3615 list_del(&ltk->list);
3616 kfree(ltk);
3617 }
3618
3619 hci_dev_unlock(hdev);
3620
3621 return;
3622
3623 not_found:
3624 neg.handle = ev->handle;
3625 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3626 hci_dev_unlock(hdev);
3627 }
3628
3629 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3630 {
3631 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3632
3633 skb_pull(skb, sizeof(*le_ev));
3634
3635 switch (le_ev->subevent) {
3636 case HCI_EV_LE_CONN_COMPLETE:
3637 hci_le_conn_complete_evt(hdev, skb);
3638 break;
3639
3640 case HCI_EV_LE_ADVERTISING_REPORT:
3641 hci_le_adv_report_evt(hdev, skb);
3642 break;
3643
3644 case HCI_EV_LE_LTK_REQ:
3645 hci_le_ltk_request_evt(hdev, skb);
3646 break;
3647
3648 default:
3649 break;
3650 }
3651 }
3652
3653 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3654 {
3655 struct hci_ev_channel_selected *ev = (void *) skb->data;
3656 struct hci_conn *hcon;
3657
3658 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3659
3660 skb_pull(skb, sizeof(*ev));
3661
3662 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3663 if (!hcon)
3664 return;
3665
3666 amp_read_loc_assoc_final_data(hdev, hcon);
3667 }
3668
3669 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3670 {
3671 struct hci_event_hdr *hdr = (void *) skb->data;
3672 __u8 event = hdr->evt;
3673
3674 hci_dev_lock(hdev);
3675
3676 /* Received events are (currently) only needed when a request is
3677 * ongoing so avoid unnecessary memory allocation.
3678 */
3679 if (hdev->req_status == HCI_REQ_PEND) {
3680 kfree_skb(hdev->recv_evt);
3681 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3682 }
3683
3684 hci_dev_unlock(hdev);
3685
3686 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3687
3688 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3689 struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data;
3690 u16 opcode = __le16_to_cpu(hdr->opcode);
3691
3692 hci_req_cmd_complete(hdev, opcode, 0);
3693 }
3694
3695 switch (event) {
3696 case HCI_EV_INQUIRY_COMPLETE:
3697 hci_inquiry_complete_evt(hdev, skb);
3698 break;
3699
3700 case HCI_EV_INQUIRY_RESULT:
3701 hci_inquiry_result_evt(hdev, skb);
3702 break;
3703
3704 case HCI_EV_CONN_COMPLETE:
3705 hci_conn_complete_evt(hdev, skb);
3706 break;
3707
3708 case HCI_EV_CONN_REQUEST:
3709 hci_conn_request_evt(hdev, skb);
3710 break;
3711
3712 case HCI_EV_DISCONN_COMPLETE:
3713 hci_disconn_complete_evt(hdev, skb);
3714 break;
3715
3716 case HCI_EV_AUTH_COMPLETE:
3717 hci_auth_complete_evt(hdev, skb);
3718 break;
3719
3720 case HCI_EV_REMOTE_NAME:
3721 hci_remote_name_evt(hdev, skb);
3722 break;
3723
3724 case HCI_EV_ENCRYPT_CHANGE:
3725 hci_encrypt_change_evt(hdev, skb);
3726 break;
3727
3728 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3729 hci_change_link_key_complete_evt(hdev, skb);
3730 break;
3731
3732 case HCI_EV_REMOTE_FEATURES:
3733 hci_remote_features_evt(hdev, skb);
3734 break;
3735
3736 case HCI_EV_CMD_COMPLETE:
3737 hci_cmd_complete_evt(hdev, skb);
3738 break;
3739
3740 case HCI_EV_CMD_STATUS:
3741 hci_cmd_status_evt(hdev, skb);
3742 break;
3743
3744 case HCI_EV_ROLE_CHANGE:
3745 hci_role_change_evt(hdev, skb);
3746 break;
3747
3748 case HCI_EV_NUM_COMP_PKTS:
3749 hci_num_comp_pkts_evt(hdev, skb);
3750 break;
3751
3752 case HCI_EV_MODE_CHANGE:
3753 hci_mode_change_evt(hdev, skb);
3754 break;
3755
3756 case HCI_EV_PIN_CODE_REQ:
3757 hci_pin_code_request_evt(hdev, skb);
3758 break;
3759
3760 case HCI_EV_LINK_KEY_REQ:
3761 hci_link_key_request_evt(hdev, skb);
3762 break;
3763
3764 case HCI_EV_LINK_KEY_NOTIFY:
3765 hci_link_key_notify_evt(hdev, skb);
3766 break;
3767
3768 case HCI_EV_CLOCK_OFFSET:
3769 hci_clock_offset_evt(hdev, skb);
3770 break;
3771
3772 case HCI_EV_PKT_TYPE_CHANGE:
3773 hci_pkt_type_change_evt(hdev, skb);
3774 break;
3775
3776 case HCI_EV_PSCAN_REP_MODE:
3777 hci_pscan_rep_mode_evt(hdev, skb);
3778 break;
3779
3780 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3781 hci_inquiry_result_with_rssi_evt(hdev, skb);
3782 break;
3783
3784 case HCI_EV_REMOTE_EXT_FEATURES:
3785 hci_remote_ext_features_evt(hdev, skb);
3786 break;
3787
3788 case HCI_EV_SYNC_CONN_COMPLETE:
3789 hci_sync_conn_complete_evt(hdev, skb);
3790 break;
3791
3792 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3793 hci_extended_inquiry_result_evt(hdev, skb);
3794 break;
3795
3796 case HCI_EV_KEY_REFRESH_COMPLETE:
3797 hci_key_refresh_complete_evt(hdev, skb);
3798 break;
3799
3800 case HCI_EV_IO_CAPA_REQUEST:
3801 hci_io_capa_request_evt(hdev, skb);
3802 break;
3803
3804 case HCI_EV_IO_CAPA_REPLY:
3805 hci_io_capa_reply_evt(hdev, skb);
3806 break;
3807
3808 case HCI_EV_USER_CONFIRM_REQUEST:
3809 hci_user_confirm_request_evt(hdev, skb);
3810 break;
3811
3812 case HCI_EV_USER_PASSKEY_REQUEST:
3813 hci_user_passkey_request_evt(hdev, skb);
3814 break;
3815
3816 case HCI_EV_USER_PASSKEY_NOTIFY:
3817 hci_user_passkey_notify_evt(hdev, skb);
3818 break;
3819
3820 case HCI_EV_KEYPRESS_NOTIFY:
3821 hci_keypress_notify_evt(hdev, skb);
3822 break;
3823
3824 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3825 hci_simple_pair_complete_evt(hdev, skb);
3826 break;
3827
3828 case HCI_EV_REMOTE_HOST_FEATURES:
3829 hci_remote_host_features_evt(hdev, skb);
3830 break;
3831
3832 case HCI_EV_LE_META:
3833 hci_le_meta_evt(hdev, skb);
3834 break;
3835
3836 case HCI_EV_CHANNEL_SELECTED:
3837 hci_chan_selected_evt(hdev, skb);
3838 break;
3839
3840 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3841 hci_remote_oob_data_request_evt(hdev, skb);
3842 break;
3843
3844 case HCI_EV_PHY_LINK_COMPLETE:
3845 hci_phy_link_complete_evt(hdev, skb);
3846 break;
3847
3848 case HCI_EV_LOGICAL_LINK_COMPLETE:
3849 hci_loglink_complete_evt(hdev, skb);
3850 break;
3851
3852 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3853 hci_disconn_loglink_complete_evt(hdev, skb);
3854 break;
3855
3856 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3857 hci_disconn_phylink_complete_evt(hdev, skb);
3858 break;
3859
3860 case HCI_EV_NUM_COMP_BLOCKS:
3861 hci_num_comp_blocks_evt(hdev, skb);
3862 break;
3863
3864 default:
3865 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3866 break;
3867 }
3868
3869 kfree_skb(skb);
3870 hdev->stat.evt_rx++;
3871 }