Bluetooth: Track feature pages in a single table
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
34
35 /* Handle HCI Event packets */
36
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 __u8 status = *((__u8 *) skb->data);
40
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
42
43 if (status) {
44 hci_dev_lock(hdev);
45 mgmt_stop_discovery_failed(hdev, status);
46 hci_dev_unlock(hdev);
47 return;
48 }
49
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
52 wake_up_bit(&hdev->flags, HCI_INQUIRY);
53
54 hci_dev_lock(hdev);
55 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
56 hci_dev_unlock(hdev);
57
58 hci_conn_check_pending(hdev);
59 }
60
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 {
63 __u8 status = *((__u8 *) skb->data);
64
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
66
67 if (status)
68 return;
69
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 }
72
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 __u8 status = *((__u8 *) skb->data);
76
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78
79 if (status)
80 return;
81
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83
84 hci_conn_check_pending(hdev);
85 }
86
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct sk_buff *skb)
161 {
162 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163
164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165
166 if (rp->status)
167 return;
168
169 hdev->link_policy = __le16_to_cpu(rp->policy);
170 }
171
172 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 struct sk_buff *skb)
174 {
175 __u8 status = *((__u8 *) skb->data);
176 void *sent;
177
178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
179
180 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 if (!sent)
182 return;
183
184 if (!status)
185 hdev->link_policy = get_unaligned_le16(sent);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 /* Reset all non-persistent flags */
197 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198
199 hdev->discovery.state = DISCOVERY_STOPPED;
200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202
203 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 hdev->adv_data_len = 0;
205 }
206
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208 {
209 __u8 status = *((__u8 *) skb->data);
210 void *sent;
211
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
213
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215 if (!sent)
216 return;
217
218 hci_dev_lock(hdev);
219
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
222 else if (!status)
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224
225 hci_dev_unlock(hdev);
226 }
227
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
231
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233
234 if (rp->status)
235 return;
236
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
245
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
247
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 if (!sent)
250 return;
251
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
254
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
257 else
258 clear_bit(HCI_AUTH, &hdev->flags);
259 }
260
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
263 }
264
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
266 {
267 __u8 status = *((__u8 *) skb->data);
268 void *sent;
269
270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
271
272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
273 if (!sent)
274 return;
275
276 if (!status) {
277 __u8 param = *((__u8 *) sent);
278
279 if (param)
280 set_bit(HCI_ENCRYPT, &hdev->flags);
281 else
282 clear_bit(HCI_ENCRYPT, &hdev->flags);
283 }
284 }
285
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 {
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
290 void *sent;
291
292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
293
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 if (!sent)
296 return;
297
298 param = *((__u8 *) sent);
299
300 hci_dev_lock(hdev);
301
302 if (status) {
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
305 goto done;
306 }
307
308 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310
311 if (param & SCAN_INQUIRY) {
312 set_bit(HCI_ISCAN, &hdev->flags);
313 if (!old_iscan)
314 mgmt_discoverable(hdev, 1);
315 if (hdev->discov_timeout > 0) {
316 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318 to);
319 }
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
322
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
325 if (!old_pscan)
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
329
330 done:
331 hci_dev_unlock(hdev);
332 }
333
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337
338 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
339
340 if (rp->status)
341 return;
342
343 memcpy(hdev->dev_class, rp->dev_class, 3);
344
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 __u8 status = *((__u8 *) skb->data);
352 void *sent;
353
354 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 if (!sent)
358 return;
359
360 hci_dev_lock(hdev);
361
362 if (status == 0)
363 memcpy(hdev->dev_class, sent, 3);
364
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
367
368 hci_dev_unlock(hdev);
369 }
370
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 __u16 setting;
375
376 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
377
378 if (rp->status)
379 return;
380
381 setting = __le16_to_cpu(rp->voice_setting);
382
383 if (hdev->voice_setting == setting)
384 return;
385
386 hdev->voice_setting = setting;
387
388 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
389
390 if (hdev->notify)
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
395 struct sk_buff *skb)
396 {
397 __u8 status = *((__u8 *) skb->data);
398 __u16 setting;
399 void *sent;
400
401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
402
403 if (status)
404 return;
405
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 if (!sent)
408 return;
409
410 setting = get_unaligned_le16(sent);
411
412 if (hdev->voice_setting == setting)
413 return;
414
415 hdev->voice_setting = setting;
416
417 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
418
419 if (hdev->notify)
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421 }
422
423 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 __u8 status = *((__u8 *) skb->data);
426 struct hci_cp_write_ssp_mode *sent;
427
428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
429
430 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
431 if (!sent)
432 return;
433
434 if (!status) {
435 if (sent->mode)
436 hdev->features[1][0] |= LMP_HOST_SSP;
437 else
438 hdev->features[1][0] &= ~LMP_HOST_SSP;
439 }
440
441 if (test_bit(HCI_MGMT, &hdev->dev_flags))
442 mgmt_ssp_enable_complete(hdev, sent->mode, status);
443 else if (!status) {
444 if (sent->mode)
445 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
446 else
447 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 }
449 }
450
451 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
452 {
453 struct hci_rp_read_local_version *rp = (void *) skb->data;
454
455 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
456
457 if (rp->status)
458 return;
459
460 hdev->hci_ver = rp->hci_ver;
461 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
462 hdev->lmp_ver = rp->lmp_ver;
463 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
464 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
465
466 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
467 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
468 }
469
470 static void hci_cc_read_local_commands(struct hci_dev *hdev,
471 struct sk_buff *skb)
472 {
473 struct hci_rp_read_local_commands *rp = (void *) skb->data;
474
475 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
476
477 if (!rp->status)
478 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
479 }
480
481 static void hci_cc_read_local_features(struct hci_dev *hdev,
482 struct sk_buff *skb)
483 {
484 struct hci_rp_read_local_features *rp = (void *) skb->data;
485
486 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
487
488 if (rp->status)
489 return;
490
491 memcpy(hdev->features, rp->features, 8);
492
493 /* Adjust default settings according to features
494 * supported by device. */
495
496 if (hdev->features[0][0] & LMP_3SLOT)
497 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
498
499 if (hdev->features[0][0] & LMP_5SLOT)
500 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
501
502 if (hdev->features[0][1] & LMP_HV2) {
503 hdev->pkt_type |= (HCI_HV2);
504 hdev->esco_type |= (ESCO_HV2);
505 }
506
507 if (hdev->features[0][1] & LMP_HV3) {
508 hdev->pkt_type |= (HCI_HV3);
509 hdev->esco_type |= (ESCO_HV3);
510 }
511
512 if (lmp_esco_capable(hdev))
513 hdev->esco_type |= (ESCO_EV3);
514
515 if (hdev->features[0][4] & LMP_EV4)
516 hdev->esco_type |= (ESCO_EV4);
517
518 if (hdev->features[0][4] & LMP_EV5)
519 hdev->esco_type |= (ESCO_EV5);
520
521 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
522 hdev->esco_type |= (ESCO_2EV3);
523
524 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
525 hdev->esco_type |= (ESCO_3EV3);
526
527 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
528 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
529
530 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
531 hdev->features[0][0], hdev->features[0][1],
532 hdev->features[0][2], hdev->features[0][3],
533 hdev->features[0][4], hdev->features[0][5],
534 hdev->features[0][6], hdev->features[0][7]);
535 }
536
537 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
538 struct sk_buff *skb)
539 {
540 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
541
542 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
543
544 if (rp->status)
545 return;
546
547 if (rp->page < HCI_MAX_PAGES)
548 memcpy(hdev->features[rp->page], rp->features, 8);
549 }
550
551 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
552 struct sk_buff *skb)
553 {
554 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
555
556 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
557
558 if (!rp->status)
559 hdev->flow_ctl_mode = rp->mode;
560 }
561
562 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
563 {
564 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
565
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
567
568 if (rp->status)
569 return;
570
571 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
572 hdev->sco_mtu = rp->sco_mtu;
573 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
574 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
575
576 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
577 hdev->sco_mtu = 64;
578 hdev->sco_pkts = 8;
579 }
580
581 hdev->acl_cnt = hdev->acl_pkts;
582 hdev->sco_cnt = hdev->sco_pkts;
583
584 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
585 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
586 }
587
588 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
589 {
590 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
591
592 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
593
594 if (!rp->status)
595 bacpy(&hdev->bdaddr, &rp->bdaddr);
596 }
597
598 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
599 struct sk_buff *skb)
600 {
601 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
602
603 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
604
605 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
606 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
607 hdev->page_scan_window = __le16_to_cpu(rp->window);
608 }
609 }
610
611 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
612 struct sk_buff *skb)
613 {
614 u8 status = *((u8 *) skb->data);
615 struct hci_cp_write_page_scan_activity *sent;
616
617 BT_DBG("%s status 0x%2.2x", hdev->name, status);
618
619 if (status)
620 return;
621
622 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
623 if (!sent)
624 return;
625
626 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
627 hdev->page_scan_window = __le16_to_cpu(sent->window);
628 }
629
630 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
631 struct sk_buff *skb)
632 {
633 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
634
635 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
636
637 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
638 hdev->page_scan_type = rp->type;
639 }
640
641 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
642 struct sk_buff *skb)
643 {
644 u8 status = *((u8 *) skb->data);
645 u8 *type;
646
647 BT_DBG("%s status 0x%2.2x", hdev->name, status);
648
649 if (status)
650 return;
651
652 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
653 if (type)
654 hdev->page_scan_type = *type;
655 }
656
657 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
658 struct sk_buff *skb)
659 {
660 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
661
662 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
663
664 if (rp->status)
665 return;
666
667 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
668 hdev->block_len = __le16_to_cpu(rp->block_len);
669 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
670
671 hdev->block_cnt = hdev->num_blocks;
672
673 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
674 hdev->block_cnt, hdev->block_len);
675 }
676
677 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
678 struct sk_buff *skb)
679 {
680 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
681
682 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
683
684 if (rp->status)
685 goto a2mp_rsp;
686
687 hdev->amp_status = rp->amp_status;
688 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
689 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
690 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
691 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
692 hdev->amp_type = rp->amp_type;
693 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
694 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
695 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
696 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
697
698 a2mp_rsp:
699 a2mp_send_getinfo_rsp(hdev);
700 }
701
702 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
703 struct sk_buff *skb)
704 {
705 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
706 struct amp_assoc *assoc = &hdev->loc_assoc;
707 size_t rem_len, frag_len;
708
709 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
710
711 if (rp->status)
712 goto a2mp_rsp;
713
714 frag_len = skb->len - sizeof(*rp);
715 rem_len = __le16_to_cpu(rp->rem_len);
716
717 if (rem_len > frag_len) {
718 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
719
720 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
721 assoc->offset += frag_len;
722
723 /* Read other fragments */
724 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
725
726 return;
727 }
728
729 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
730 assoc->len = assoc->offset + rem_len;
731 assoc->offset = 0;
732
733 a2mp_rsp:
734 /* Send A2MP Rsp when all fragments are received */
735 a2mp_send_getampassoc_rsp(hdev, rp->status);
736 a2mp_send_create_phy_link_req(hdev, rp->status);
737 }
738
739 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
740 struct sk_buff *skb)
741 {
742 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
743
744 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
745
746 if (!rp->status)
747 hdev->inq_tx_power = rp->tx_power;
748 }
749
750 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
751 {
752 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
753 struct hci_cp_pin_code_reply *cp;
754 struct hci_conn *conn;
755
756 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
757
758 hci_dev_lock(hdev);
759
760 if (test_bit(HCI_MGMT, &hdev->dev_flags))
761 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
762
763 if (rp->status)
764 goto unlock;
765
766 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
767 if (!cp)
768 goto unlock;
769
770 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
771 if (conn)
772 conn->pin_length = cp->pin_len;
773
774 unlock:
775 hci_dev_unlock(hdev);
776 }
777
778 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
779 {
780 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
781
782 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
783
784 hci_dev_lock(hdev);
785
786 if (test_bit(HCI_MGMT, &hdev->dev_flags))
787 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
788 rp->status);
789
790 hci_dev_unlock(hdev);
791 }
792
793 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
794 struct sk_buff *skb)
795 {
796 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
797
798 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
799
800 if (rp->status)
801 return;
802
803 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
804 hdev->le_pkts = rp->le_max_pkt;
805
806 hdev->le_cnt = hdev->le_pkts;
807
808 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
809 }
810
811 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
812 struct sk_buff *skb)
813 {
814 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
815
816 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
817
818 if (!rp->status)
819 memcpy(hdev->le_features, rp->features, 8);
820 }
821
822 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
823 struct sk_buff *skb)
824 {
825 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
826
827 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828
829 if (!rp->status)
830 hdev->adv_tx_power = rp->tx_power;
831 }
832
833 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
834 {
835 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
836
837 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
838
839 hci_dev_lock(hdev);
840
841 if (test_bit(HCI_MGMT, &hdev->dev_flags))
842 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
843 rp->status);
844
845 hci_dev_unlock(hdev);
846 }
847
848 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
849 struct sk_buff *skb)
850 {
851 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
852
853 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
854
855 hci_dev_lock(hdev);
856
857 if (test_bit(HCI_MGMT, &hdev->dev_flags))
858 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
859 ACL_LINK, 0, rp->status);
860
861 hci_dev_unlock(hdev);
862 }
863
864 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
865 {
866 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
867
868 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
869
870 hci_dev_lock(hdev);
871
872 if (test_bit(HCI_MGMT, &hdev->dev_flags))
873 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
874 0, rp->status);
875
876 hci_dev_unlock(hdev);
877 }
878
879 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
880 struct sk_buff *skb)
881 {
882 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
883
884 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
885
886 hci_dev_lock(hdev);
887
888 if (test_bit(HCI_MGMT, &hdev->dev_flags))
889 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
890 ACL_LINK, 0, rp->status);
891
892 hci_dev_unlock(hdev);
893 }
894
895 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
896 struct sk_buff *skb)
897 {
898 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
899
900 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
901
902 hci_dev_lock(hdev);
903 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
904 rp->randomizer, rp->status);
905 hci_dev_unlock(hdev);
906 }
907
908 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
909 {
910 __u8 *sent, status = *((__u8 *) skb->data);
911
912 BT_DBG("%s status 0x%2.2x", hdev->name, status);
913
914 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
915 if (!sent)
916 return;
917
918 hci_dev_lock(hdev);
919
920 if (!status) {
921 if (*sent)
922 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
923 else
924 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
925 }
926
927 if (!test_bit(HCI_INIT, &hdev->flags)) {
928 struct hci_request req;
929
930 hci_req_init(&req, hdev);
931 hci_update_ad(&req);
932 hci_req_run(&req, NULL);
933 }
934
935 hci_dev_unlock(hdev);
936 }
937
938 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
939 {
940 __u8 status = *((__u8 *) skb->data);
941
942 BT_DBG("%s status 0x%2.2x", hdev->name, status);
943
944 if (status) {
945 hci_dev_lock(hdev);
946 mgmt_start_discovery_failed(hdev, status);
947 hci_dev_unlock(hdev);
948 return;
949 }
950 }
951
952 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
953 struct sk_buff *skb)
954 {
955 struct hci_cp_le_set_scan_enable *cp;
956 __u8 status = *((__u8 *) skb->data);
957
958 BT_DBG("%s status 0x%2.2x", hdev->name, status);
959
960 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
961 if (!cp)
962 return;
963
964 switch (cp->enable) {
965 case LE_SCANNING_ENABLED:
966 if (status) {
967 hci_dev_lock(hdev);
968 mgmt_start_discovery_failed(hdev, status);
969 hci_dev_unlock(hdev);
970 return;
971 }
972
973 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
974
975 hci_dev_lock(hdev);
976 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
977 hci_dev_unlock(hdev);
978 break;
979
980 case LE_SCANNING_DISABLED:
981 if (status) {
982 hci_dev_lock(hdev);
983 mgmt_stop_discovery_failed(hdev, status);
984 hci_dev_unlock(hdev);
985 return;
986 }
987
988 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
989
990 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
991 hdev->discovery.state == DISCOVERY_FINDING) {
992 mgmt_interleaved_discovery(hdev);
993 } else {
994 hci_dev_lock(hdev);
995 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
996 hci_dev_unlock(hdev);
997 }
998
999 break;
1000
1001 default:
1002 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1003 break;
1004 }
1005 }
1006
1007 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1008 struct sk_buff *skb)
1009 {
1010 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1011
1012 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1013
1014 if (!rp->status)
1015 hdev->le_white_list_size = rp->size;
1016 }
1017
1018 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1019 struct sk_buff *skb)
1020 {
1021 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1022
1023 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1024
1025 if (!rp->status)
1026 memcpy(hdev->le_states, rp->le_states, 8);
1027 }
1028
1029 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1030 struct sk_buff *skb)
1031 {
1032 struct hci_cp_write_le_host_supported *sent;
1033 __u8 status = *((__u8 *) skb->data);
1034
1035 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1036
1037 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1038 if (!sent)
1039 return;
1040
1041 if (!status) {
1042 if (sent->le)
1043 hdev->features[1][0] |= LMP_HOST_LE;
1044 else
1045 hdev->features[1][0] &= ~LMP_HOST_LE;
1046
1047 if (sent->simul)
1048 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1049 else
1050 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1051 }
1052
1053 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1054 !test_bit(HCI_INIT, &hdev->flags))
1055 mgmt_le_enable_complete(hdev, sent->le, status);
1056 }
1057
1058 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1059 struct sk_buff *skb)
1060 {
1061 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1062
1063 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1064 hdev->name, rp->status, rp->phy_handle);
1065
1066 if (rp->status)
1067 return;
1068
1069 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1070 }
1071
1072 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1073 {
1074 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1075
1076 if (status) {
1077 hci_conn_check_pending(hdev);
1078 hci_dev_lock(hdev);
1079 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1080 mgmt_start_discovery_failed(hdev, status);
1081 hci_dev_unlock(hdev);
1082 return;
1083 }
1084
1085 set_bit(HCI_INQUIRY, &hdev->flags);
1086
1087 hci_dev_lock(hdev);
1088 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1089 hci_dev_unlock(hdev);
1090 }
1091
1092 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1093 {
1094 struct hci_cp_create_conn *cp;
1095 struct hci_conn *conn;
1096
1097 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1098
1099 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1100 if (!cp)
1101 return;
1102
1103 hci_dev_lock(hdev);
1104
1105 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1106
1107 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1108
1109 if (status) {
1110 if (conn && conn->state == BT_CONNECT) {
1111 if (status != 0x0c || conn->attempt > 2) {
1112 conn->state = BT_CLOSED;
1113 hci_proto_connect_cfm(conn, status);
1114 hci_conn_del(conn);
1115 } else
1116 conn->state = BT_CONNECT2;
1117 }
1118 } else {
1119 if (!conn) {
1120 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1121 if (conn) {
1122 conn->out = true;
1123 conn->link_mode |= HCI_LM_MASTER;
1124 } else
1125 BT_ERR("No memory for new connection");
1126 }
1127 }
1128
1129 hci_dev_unlock(hdev);
1130 }
1131
1132 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1133 {
1134 struct hci_cp_add_sco *cp;
1135 struct hci_conn *acl, *sco;
1136 __u16 handle;
1137
1138 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1139
1140 if (!status)
1141 return;
1142
1143 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1144 if (!cp)
1145 return;
1146
1147 handle = __le16_to_cpu(cp->handle);
1148
1149 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1150
1151 hci_dev_lock(hdev);
1152
1153 acl = hci_conn_hash_lookup_handle(hdev, handle);
1154 if (acl) {
1155 sco = acl->link;
1156 if (sco) {
1157 sco->state = BT_CLOSED;
1158
1159 hci_proto_connect_cfm(sco, status);
1160 hci_conn_del(sco);
1161 }
1162 }
1163
1164 hci_dev_unlock(hdev);
1165 }
1166
1167 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1168 {
1169 struct hci_cp_auth_requested *cp;
1170 struct hci_conn *conn;
1171
1172 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1173
1174 if (!status)
1175 return;
1176
1177 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1178 if (!cp)
1179 return;
1180
1181 hci_dev_lock(hdev);
1182
1183 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1184 if (conn) {
1185 if (conn->state == BT_CONFIG) {
1186 hci_proto_connect_cfm(conn, status);
1187 hci_conn_drop(conn);
1188 }
1189 }
1190
1191 hci_dev_unlock(hdev);
1192 }
1193
1194 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1195 {
1196 struct hci_cp_set_conn_encrypt *cp;
1197 struct hci_conn *conn;
1198
1199 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1200
1201 if (!status)
1202 return;
1203
1204 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1205 if (!cp)
1206 return;
1207
1208 hci_dev_lock(hdev);
1209
1210 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1211 if (conn) {
1212 if (conn->state == BT_CONFIG) {
1213 hci_proto_connect_cfm(conn, status);
1214 hci_conn_drop(conn);
1215 }
1216 }
1217
1218 hci_dev_unlock(hdev);
1219 }
1220
1221 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1222 struct hci_conn *conn)
1223 {
1224 if (conn->state != BT_CONFIG || !conn->out)
1225 return 0;
1226
1227 if (conn->pending_sec_level == BT_SECURITY_SDP)
1228 return 0;
1229
1230 /* Only request authentication for SSP connections or non-SSP
1231 * devices with sec_level HIGH or if MITM protection is requested */
1232 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1233 conn->pending_sec_level != BT_SECURITY_HIGH)
1234 return 0;
1235
1236 return 1;
1237 }
1238
1239 static int hci_resolve_name(struct hci_dev *hdev,
1240 struct inquiry_entry *e)
1241 {
1242 struct hci_cp_remote_name_req cp;
1243
1244 memset(&cp, 0, sizeof(cp));
1245
1246 bacpy(&cp.bdaddr, &e->data.bdaddr);
1247 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1248 cp.pscan_mode = e->data.pscan_mode;
1249 cp.clock_offset = e->data.clock_offset;
1250
1251 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1252 }
1253
1254 static bool hci_resolve_next_name(struct hci_dev *hdev)
1255 {
1256 struct discovery_state *discov = &hdev->discovery;
1257 struct inquiry_entry *e;
1258
1259 if (list_empty(&discov->resolve))
1260 return false;
1261
1262 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1263 if (!e)
1264 return false;
1265
1266 if (hci_resolve_name(hdev, e) == 0) {
1267 e->name_state = NAME_PENDING;
1268 return true;
1269 }
1270
1271 return false;
1272 }
1273
1274 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1275 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1276 {
1277 struct discovery_state *discov = &hdev->discovery;
1278 struct inquiry_entry *e;
1279
1280 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1281 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1282 name_len, conn->dev_class);
1283
1284 if (discov->state == DISCOVERY_STOPPED)
1285 return;
1286
1287 if (discov->state == DISCOVERY_STOPPING)
1288 goto discov_complete;
1289
1290 if (discov->state != DISCOVERY_RESOLVING)
1291 return;
1292
1293 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1294 /* If the device was not found in a list of found devices names of which
1295 * are pending. there is no need to continue resolving a next name as it
1296 * will be done upon receiving another Remote Name Request Complete
1297 * Event */
1298 if (!e)
1299 return;
1300
1301 list_del(&e->list);
1302 if (name) {
1303 e->name_state = NAME_KNOWN;
1304 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1305 e->data.rssi, name, name_len);
1306 } else {
1307 e->name_state = NAME_NOT_KNOWN;
1308 }
1309
1310 if (hci_resolve_next_name(hdev))
1311 return;
1312
1313 discov_complete:
1314 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1315 }
1316
1317 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1318 {
1319 struct hci_cp_remote_name_req *cp;
1320 struct hci_conn *conn;
1321
1322 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1323
1324 /* If successful wait for the name req complete event before
1325 * checking for the need to do authentication */
1326 if (!status)
1327 return;
1328
1329 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1330 if (!cp)
1331 return;
1332
1333 hci_dev_lock(hdev);
1334
1335 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1336
1337 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1338 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1339
1340 if (!conn)
1341 goto unlock;
1342
1343 if (!hci_outgoing_auth_needed(hdev, conn))
1344 goto unlock;
1345
1346 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1347 struct hci_cp_auth_requested cp;
1348 cp.handle = __cpu_to_le16(conn->handle);
1349 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1350 }
1351
1352 unlock:
1353 hci_dev_unlock(hdev);
1354 }
1355
1356 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1357 {
1358 struct hci_cp_read_remote_features *cp;
1359 struct hci_conn *conn;
1360
1361 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1362
1363 if (!status)
1364 return;
1365
1366 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1367 if (!cp)
1368 return;
1369
1370 hci_dev_lock(hdev);
1371
1372 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1373 if (conn) {
1374 if (conn->state == BT_CONFIG) {
1375 hci_proto_connect_cfm(conn, status);
1376 hci_conn_drop(conn);
1377 }
1378 }
1379
1380 hci_dev_unlock(hdev);
1381 }
1382
1383 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1384 {
1385 struct hci_cp_read_remote_ext_features *cp;
1386 struct hci_conn *conn;
1387
1388 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1389
1390 if (!status)
1391 return;
1392
1393 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1394 if (!cp)
1395 return;
1396
1397 hci_dev_lock(hdev);
1398
1399 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1400 if (conn) {
1401 if (conn->state == BT_CONFIG) {
1402 hci_proto_connect_cfm(conn, status);
1403 hci_conn_drop(conn);
1404 }
1405 }
1406
1407 hci_dev_unlock(hdev);
1408 }
1409
1410 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1411 {
1412 struct hci_cp_setup_sync_conn *cp;
1413 struct hci_conn *acl, *sco;
1414 __u16 handle;
1415
1416 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1417
1418 if (!status)
1419 return;
1420
1421 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1422 if (!cp)
1423 return;
1424
1425 handle = __le16_to_cpu(cp->handle);
1426
1427 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1428
1429 hci_dev_lock(hdev);
1430
1431 acl = hci_conn_hash_lookup_handle(hdev, handle);
1432 if (acl) {
1433 sco = acl->link;
1434 if (sco) {
1435 sco->state = BT_CLOSED;
1436
1437 hci_proto_connect_cfm(sco, status);
1438 hci_conn_del(sco);
1439 }
1440 }
1441
1442 hci_dev_unlock(hdev);
1443 }
1444
1445 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1446 {
1447 struct hci_cp_sniff_mode *cp;
1448 struct hci_conn *conn;
1449
1450 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1451
1452 if (!status)
1453 return;
1454
1455 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1456 if (!cp)
1457 return;
1458
1459 hci_dev_lock(hdev);
1460
1461 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1462 if (conn) {
1463 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1464
1465 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1466 hci_sco_setup(conn, status);
1467 }
1468
1469 hci_dev_unlock(hdev);
1470 }
1471
1472 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1473 {
1474 struct hci_cp_exit_sniff_mode *cp;
1475 struct hci_conn *conn;
1476
1477 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1478
1479 if (!status)
1480 return;
1481
1482 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1483 if (!cp)
1484 return;
1485
1486 hci_dev_lock(hdev);
1487
1488 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1489 if (conn) {
1490 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1491
1492 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1493 hci_sco_setup(conn, status);
1494 }
1495
1496 hci_dev_unlock(hdev);
1497 }
1498
1499 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1500 {
1501 struct hci_cp_disconnect *cp;
1502 struct hci_conn *conn;
1503
1504 if (!status)
1505 return;
1506
1507 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1508 if (!cp)
1509 return;
1510
1511 hci_dev_lock(hdev);
1512
1513 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1514 if (conn)
1515 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1516 conn->dst_type, status);
1517
1518 hci_dev_unlock(hdev);
1519 }
1520
1521 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1522 {
1523 struct hci_conn *conn;
1524
1525 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1526
1527 if (status) {
1528 hci_dev_lock(hdev);
1529
1530 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1531 if (!conn) {
1532 hci_dev_unlock(hdev);
1533 return;
1534 }
1535
1536 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1537
1538 conn->state = BT_CLOSED;
1539 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1540 conn->dst_type, status);
1541 hci_proto_connect_cfm(conn, status);
1542 hci_conn_del(conn);
1543
1544 hci_dev_unlock(hdev);
1545 }
1546 }
1547
1548 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1549 {
1550 struct hci_cp_create_phy_link *cp;
1551
1552 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1553
1554 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1555 if (!cp)
1556 return;
1557
1558 hci_dev_lock(hdev);
1559
1560 if (status) {
1561 struct hci_conn *hcon;
1562
1563 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1564 if (hcon)
1565 hci_conn_del(hcon);
1566 } else {
1567 amp_write_remote_assoc(hdev, cp->phy_handle);
1568 }
1569
1570 hci_dev_unlock(hdev);
1571 }
1572
1573 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1574 {
1575 struct hci_cp_accept_phy_link *cp;
1576
1577 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1578
1579 if (status)
1580 return;
1581
1582 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1583 if (!cp)
1584 return;
1585
1586 amp_write_remote_assoc(hdev, cp->phy_handle);
1587 }
1588
1589 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1590 {
1591 __u8 status = *((__u8 *) skb->data);
1592 struct discovery_state *discov = &hdev->discovery;
1593 struct inquiry_entry *e;
1594
1595 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1596
1597 hci_conn_check_pending(hdev);
1598
1599 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1600 return;
1601
1602 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1603 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1604
1605 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1606 return;
1607
1608 hci_dev_lock(hdev);
1609
1610 if (discov->state != DISCOVERY_FINDING)
1611 goto unlock;
1612
1613 if (list_empty(&discov->resolve)) {
1614 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1615 goto unlock;
1616 }
1617
1618 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1619 if (e && hci_resolve_name(hdev, e) == 0) {
1620 e->name_state = NAME_PENDING;
1621 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1622 } else {
1623 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1624 }
1625
1626 unlock:
1627 hci_dev_unlock(hdev);
1628 }
1629
1630 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1631 {
1632 struct inquiry_data data;
1633 struct inquiry_info *info = (void *) (skb->data + 1);
1634 int num_rsp = *((__u8 *) skb->data);
1635
1636 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1637
1638 if (!num_rsp)
1639 return;
1640
1641 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1642 return;
1643
1644 hci_dev_lock(hdev);
1645
1646 for (; num_rsp; num_rsp--, info++) {
1647 bool name_known, ssp;
1648
1649 bacpy(&data.bdaddr, &info->bdaddr);
1650 data.pscan_rep_mode = info->pscan_rep_mode;
1651 data.pscan_period_mode = info->pscan_period_mode;
1652 data.pscan_mode = info->pscan_mode;
1653 memcpy(data.dev_class, info->dev_class, 3);
1654 data.clock_offset = info->clock_offset;
1655 data.rssi = 0x00;
1656 data.ssp_mode = 0x00;
1657
1658 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1659 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1660 info->dev_class, 0, !name_known, ssp, NULL,
1661 0);
1662 }
1663
1664 hci_dev_unlock(hdev);
1665 }
1666
1667 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1668 {
1669 struct hci_ev_conn_complete *ev = (void *) skb->data;
1670 struct hci_conn *conn;
1671
1672 BT_DBG("%s", hdev->name);
1673
1674 hci_dev_lock(hdev);
1675
1676 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1677 if (!conn) {
1678 if (ev->link_type != SCO_LINK)
1679 goto unlock;
1680
1681 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1682 if (!conn)
1683 goto unlock;
1684
1685 conn->type = SCO_LINK;
1686 }
1687
1688 if (!ev->status) {
1689 conn->handle = __le16_to_cpu(ev->handle);
1690
1691 if (conn->type == ACL_LINK) {
1692 conn->state = BT_CONFIG;
1693 hci_conn_hold(conn);
1694
1695 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1696 !hci_find_link_key(hdev, &ev->bdaddr))
1697 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1698 else
1699 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1700 } else
1701 conn->state = BT_CONNECTED;
1702
1703 hci_conn_add_sysfs(conn);
1704
1705 if (test_bit(HCI_AUTH, &hdev->flags))
1706 conn->link_mode |= HCI_LM_AUTH;
1707
1708 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1709 conn->link_mode |= HCI_LM_ENCRYPT;
1710
1711 /* Get remote features */
1712 if (conn->type == ACL_LINK) {
1713 struct hci_cp_read_remote_features cp;
1714 cp.handle = ev->handle;
1715 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1716 sizeof(cp), &cp);
1717 }
1718
1719 /* Set packet type for incoming connection */
1720 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1721 struct hci_cp_change_conn_ptype cp;
1722 cp.handle = ev->handle;
1723 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1724 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1725 &cp);
1726 }
1727 } else {
1728 conn->state = BT_CLOSED;
1729 if (conn->type == ACL_LINK)
1730 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1731 conn->dst_type, ev->status);
1732 }
1733
1734 if (conn->type == ACL_LINK)
1735 hci_sco_setup(conn, ev->status);
1736
1737 if (ev->status) {
1738 hci_proto_connect_cfm(conn, ev->status);
1739 hci_conn_del(conn);
1740 } else if (ev->link_type != ACL_LINK)
1741 hci_proto_connect_cfm(conn, ev->status);
1742
1743 unlock:
1744 hci_dev_unlock(hdev);
1745
1746 hci_conn_check_pending(hdev);
1747 }
1748
1749 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1750 {
1751 struct hci_ev_conn_request *ev = (void *) skb->data;
1752 int mask = hdev->link_mode;
1753 __u8 flags = 0;
1754
1755 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1756 ev->link_type);
1757
1758 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1759 &flags);
1760
1761 if ((mask & HCI_LM_ACCEPT) &&
1762 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1763 /* Connection accepted */
1764 struct inquiry_entry *ie;
1765 struct hci_conn *conn;
1766
1767 hci_dev_lock(hdev);
1768
1769 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1770 if (ie)
1771 memcpy(ie->data.dev_class, ev->dev_class, 3);
1772
1773 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1774 &ev->bdaddr);
1775 if (!conn) {
1776 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1777 if (!conn) {
1778 BT_ERR("No memory for new connection");
1779 hci_dev_unlock(hdev);
1780 return;
1781 }
1782 }
1783
1784 memcpy(conn->dev_class, ev->dev_class, 3);
1785
1786 hci_dev_unlock(hdev);
1787
1788 if (ev->link_type == ACL_LINK ||
1789 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1790 struct hci_cp_accept_conn_req cp;
1791 conn->state = BT_CONNECT;
1792
1793 bacpy(&cp.bdaddr, &ev->bdaddr);
1794
1795 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1796 cp.role = 0x00; /* Become master */
1797 else
1798 cp.role = 0x01; /* Remain slave */
1799
1800 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1801 &cp);
1802 } else if (!(flags & HCI_PROTO_DEFER)) {
1803 struct hci_cp_accept_sync_conn_req cp;
1804 conn->state = BT_CONNECT;
1805
1806 bacpy(&cp.bdaddr, &ev->bdaddr);
1807 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1808
1809 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1810 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1811 cp.max_latency = __constant_cpu_to_le16(0xffff);
1812 cp.content_format = cpu_to_le16(hdev->voice_setting);
1813 cp.retrans_effort = 0xff;
1814
1815 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1816 sizeof(cp), &cp);
1817 } else {
1818 conn->state = BT_CONNECT2;
1819 hci_proto_connect_cfm(conn, 0);
1820 }
1821 } else {
1822 /* Connection rejected */
1823 struct hci_cp_reject_conn_req cp;
1824
1825 bacpy(&cp.bdaddr, &ev->bdaddr);
1826 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1827 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1828 }
1829 }
1830
1831 static u8 hci_to_mgmt_reason(u8 err)
1832 {
1833 switch (err) {
1834 case HCI_ERROR_CONNECTION_TIMEOUT:
1835 return MGMT_DEV_DISCONN_TIMEOUT;
1836 case HCI_ERROR_REMOTE_USER_TERM:
1837 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1838 case HCI_ERROR_REMOTE_POWER_OFF:
1839 return MGMT_DEV_DISCONN_REMOTE;
1840 case HCI_ERROR_LOCAL_HOST_TERM:
1841 return MGMT_DEV_DISCONN_LOCAL_HOST;
1842 default:
1843 return MGMT_DEV_DISCONN_UNKNOWN;
1844 }
1845 }
1846
1847 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1848 {
1849 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1850 struct hci_conn *conn;
1851
1852 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1853
1854 hci_dev_lock(hdev);
1855
1856 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1857 if (!conn)
1858 goto unlock;
1859
1860 if (ev->status == 0)
1861 conn->state = BT_CLOSED;
1862
1863 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1864 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1865 if (ev->status) {
1866 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1867 conn->dst_type, ev->status);
1868 } else {
1869 u8 reason = hci_to_mgmt_reason(ev->reason);
1870
1871 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1872 conn->dst_type, reason);
1873 }
1874 }
1875
1876 if (ev->status == 0) {
1877 if (conn->type == ACL_LINK && conn->flush_key)
1878 hci_remove_link_key(hdev, &conn->dst);
1879 hci_proto_disconn_cfm(conn, ev->reason);
1880 hci_conn_del(conn);
1881 }
1882
1883 unlock:
1884 hci_dev_unlock(hdev);
1885 }
1886
1887 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1888 {
1889 struct hci_ev_auth_complete *ev = (void *) skb->data;
1890 struct hci_conn *conn;
1891
1892 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1893
1894 hci_dev_lock(hdev);
1895
1896 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1897 if (!conn)
1898 goto unlock;
1899
1900 if (!ev->status) {
1901 if (!hci_conn_ssp_enabled(conn) &&
1902 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1903 BT_INFO("re-auth of legacy device is not possible.");
1904 } else {
1905 conn->link_mode |= HCI_LM_AUTH;
1906 conn->sec_level = conn->pending_sec_level;
1907 }
1908 } else {
1909 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1910 ev->status);
1911 }
1912
1913 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1914 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1915
1916 if (conn->state == BT_CONFIG) {
1917 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1918 struct hci_cp_set_conn_encrypt cp;
1919 cp.handle = ev->handle;
1920 cp.encrypt = 0x01;
1921 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1922 &cp);
1923 } else {
1924 conn->state = BT_CONNECTED;
1925 hci_proto_connect_cfm(conn, ev->status);
1926 hci_conn_drop(conn);
1927 }
1928 } else {
1929 hci_auth_cfm(conn, ev->status);
1930
1931 hci_conn_hold(conn);
1932 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1933 hci_conn_drop(conn);
1934 }
1935
1936 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1937 if (!ev->status) {
1938 struct hci_cp_set_conn_encrypt cp;
1939 cp.handle = ev->handle;
1940 cp.encrypt = 0x01;
1941 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1942 &cp);
1943 } else {
1944 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1945 hci_encrypt_cfm(conn, ev->status, 0x00);
1946 }
1947 }
1948
1949 unlock:
1950 hci_dev_unlock(hdev);
1951 }
1952
1953 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1954 {
1955 struct hci_ev_remote_name *ev = (void *) skb->data;
1956 struct hci_conn *conn;
1957
1958 BT_DBG("%s", hdev->name);
1959
1960 hci_conn_check_pending(hdev);
1961
1962 hci_dev_lock(hdev);
1963
1964 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1965
1966 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1967 goto check_auth;
1968
1969 if (ev->status == 0)
1970 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1971 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1972 else
1973 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1974
1975 check_auth:
1976 if (!conn)
1977 goto unlock;
1978
1979 if (!hci_outgoing_auth_needed(hdev, conn))
1980 goto unlock;
1981
1982 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1983 struct hci_cp_auth_requested cp;
1984 cp.handle = __cpu_to_le16(conn->handle);
1985 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1986 }
1987
1988 unlock:
1989 hci_dev_unlock(hdev);
1990 }
1991
1992 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1993 {
1994 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1995 struct hci_conn *conn;
1996
1997 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1998
1999 hci_dev_lock(hdev);
2000
2001 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2002 if (conn) {
2003 if (!ev->status) {
2004 if (ev->encrypt) {
2005 /* Encryption implies authentication */
2006 conn->link_mode |= HCI_LM_AUTH;
2007 conn->link_mode |= HCI_LM_ENCRYPT;
2008 conn->sec_level = conn->pending_sec_level;
2009 } else
2010 conn->link_mode &= ~HCI_LM_ENCRYPT;
2011 }
2012
2013 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2014
2015 if (ev->status && conn->state == BT_CONNECTED) {
2016 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2017 hci_conn_drop(conn);
2018 goto unlock;
2019 }
2020
2021 if (conn->state == BT_CONFIG) {
2022 if (!ev->status)
2023 conn->state = BT_CONNECTED;
2024
2025 hci_proto_connect_cfm(conn, ev->status);
2026 hci_conn_drop(conn);
2027 } else
2028 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2029 }
2030
2031 unlock:
2032 hci_dev_unlock(hdev);
2033 }
2034
2035 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2036 struct sk_buff *skb)
2037 {
2038 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2039 struct hci_conn *conn;
2040
2041 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2042
2043 hci_dev_lock(hdev);
2044
2045 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2046 if (conn) {
2047 if (!ev->status)
2048 conn->link_mode |= HCI_LM_SECURE;
2049
2050 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2051
2052 hci_key_change_cfm(conn, ev->status);
2053 }
2054
2055 hci_dev_unlock(hdev);
2056 }
2057
2058 static void hci_remote_features_evt(struct hci_dev *hdev,
2059 struct sk_buff *skb)
2060 {
2061 struct hci_ev_remote_features *ev = (void *) skb->data;
2062 struct hci_conn *conn;
2063
2064 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2065
2066 hci_dev_lock(hdev);
2067
2068 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2069 if (!conn)
2070 goto unlock;
2071
2072 if (!ev->status)
2073 memcpy(conn->features[0], ev->features, 8);
2074
2075 if (conn->state != BT_CONFIG)
2076 goto unlock;
2077
2078 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2079 struct hci_cp_read_remote_ext_features cp;
2080 cp.handle = ev->handle;
2081 cp.page = 0x01;
2082 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2083 sizeof(cp), &cp);
2084 goto unlock;
2085 }
2086
2087 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2088 struct hci_cp_remote_name_req cp;
2089 memset(&cp, 0, sizeof(cp));
2090 bacpy(&cp.bdaddr, &conn->dst);
2091 cp.pscan_rep_mode = 0x02;
2092 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2093 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2094 mgmt_device_connected(hdev, &conn->dst, conn->type,
2095 conn->dst_type, 0, NULL, 0,
2096 conn->dev_class);
2097
2098 if (!hci_outgoing_auth_needed(hdev, conn)) {
2099 conn->state = BT_CONNECTED;
2100 hci_proto_connect_cfm(conn, ev->status);
2101 hci_conn_drop(conn);
2102 }
2103
2104 unlock:
2105 hci_dev_unlock(hdev);
2106 }
2107
2108 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2109 {
2110 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2111 u8 status = skb->data[sizeof(*ev)];
2112 __u16 opcode;
2113
2114 skb_pull(skb, sizeof(*ev));
2115
2116 opcode = __le16_to_cpu(ev->opcode);
2117
2118 switch (opcode) {
2119 case HCI_OP_INQUIRY_CANCEL:
2120 hci_cc_inquiry_cancel(hdev, skb);
2121 break;
2122
2123 case HCI_OP_PERIODIC_INQ:
2124 hci_cc_periodic_inq(hdev, skb);
2125 break;
2126
2127 case HCI_OP_EXIT_PERIODIC_INQ:
2128 hci_cc_exit_periodic_inq(hdev, skb);
2129 break;
2130
2131 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2132 hci_cc_remote_name_req_cancel(hdev, skb);
2133 break;
2134
2135 case HCI_OP_ROLE_DISCOVERY:
2136 hci_cc_role_discovery(hdev, skb);
2137 break;
2138
2139 case HCI_OP_READ_LINK_POLICY:
2140 hci_cc_read_link_policy(hdev, skb);
2141 break;
2142
2143 case HCI_OP_WRITE_LINK_POLICY:
2144 hci_cc_write_link_policy(hdev, skb);
2145 break;
2146
2147 case HCI_OP_READ_DEF_LINK_POLICY:
2148 hci_cc_read_def_link_policy(hdev, skb);
2149 break;
2150
2151 case HCI_OP_WRITE_DEF_LINK_POLICY:
2152 hci_cc_write_def_link_policy(hdev, skb);
2153 break;
2154
2155 case HCI_OP_RESET:
2156 hci_cc_reset(hdev, skb);
2157 break;
2158
2159 case HCI_OP_WRITE_LOCAL_NAME:
2160 hci_cc_write_local_name(hdev, skb);
2161 break;
2162
2163 case HCI_OP_READ_LOCAL_NAME:
2164 hci_cc_read_local_name(hdev, skb);
2165 break;
2166
2167 case HCI_OP_WRITE_AUTH_ENABLE:
2168 hci_cc_write_auth_enable(hdev, skb);
2169 break;
2170
2171 case HCI_OP_WRITE_ENCRYPT_MODE:
2172 hci_cc_write_encrypt_mode(hdev, skb);
2173 break;
2174
2175 case HCI_OP_WRITE_SCAN_ENABLE:
2176 hci_cc_write_scan_enable(hdev, skb);
2177 break;
2178
2179 case HCI_OP_READ_CLASS_OF_DEV:
2180 hci_cc_read_class_of_dev(hdev, skb);
2181 break;
2182
2183 case HCI_OP_WRITE_CLASS_OF_DEV:
2184 hci_cc_write_class_of_dev(hdev, skb);
2185 break;
2186
2187 case HCI_OP_READ_VOICE_SETTING:
2188 hci_cc_read_voice_setting(hdev, skb);
2189 break;
2190
2191 case HCI_OP_WRITE_VOICE_SETTING:
2192 hci_cc_write_voice_setting(hdev, skb);
2193 break;
2194
2195 case HCI_OP_WRITE_SSP_MODE:
2196 hci_cc_write_ssp_mode(hdev, skb);
2197 break;
2198
2199 case HCI_OP_READ_LOCAL_VERSION:
2200 hci_cc_read_local_version(hdev, skb);
2201 break;
2202
2203 case HCI_OP_READ_LOCAL_COMMANDS:
2204 hci_cc_read_local_commands(hdev, skb);
2205 break;
2206
2207 case HCI_OP_READ_LOCAL_FEATURES:
2208 hci_cc_read_local_features(hdev, skb);
2209 break;
2210
2211 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2212 hci_cc_read_local_ext_features(hdev, skb);
2213 break;
2214
2215 case HCI_OP_READ_BUFFER_SIZE:
2216 hci_cc_read_buffer_size(hdev, skb);
2217 break;
2218
2219 case HCI_OP_READ_BD_ADDR:
2220 hci_cc_read_bd_addr(hdev, skb);
2221 break;
2222
2223 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2224 hci_cc_read_page_scan_activity(hdev, skb);
2225 break;
2226
2227 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2228 hci_cc_write_page_scan_activity(hdev, skb);
2229 break;
2230
2231 case HCI_OP_READ_PAGE_SCAN_TYPE:
2232 hci_cc_read_page_scan_type(hdev, skb);
2233 break;
2234
2235 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2236 hci_cc_write_page_scan_type(hdev, skb);
2237 break;
2238
2239 case HCI_OP_READ_DATA_BLOCK_SIZE:
2240 hci_cc_read_data_block_size(hdev, skb);
2241 break;
2242
2243 case HCI_OP_READ_FLOW_CONTROL_MODE:
2244 hci_cc_read_flow_control_mode(hdev, skb);
2245 break;
2246
2247 case HCI_OP_READ_LOCAL_AMP_INFO:
2248 hci_cc_read_local_amp_info(hdev, skb);
2249 break;
2250
2251 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2252 hci_cc_read_local_amp_assoc(hdev, skb);
2253 break;
2254
2255 case HCI_OP_READ_INQ_RSP_TX_POWER:
2256 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2257 break;
2258
2259 case HCI_OP_PIN_CODE_REPLY:
2260 hci_cc_pin_code_reply(hdev, skb);
2261 break;
2262
2263 case HCI_OP_PIN_CODE_NEG_REPLY:
2264 hci_cc_pin_code_neg_reply(hdev, skb);
2265 break;
2266
2267 case HCI_OP_READ_LOCAL_OOB_DATA:
2268 hci_cc_read_local_oob_data_reply(hdev, skb);
2269 break;
2270
2271 case HCI_OP_LE_READ_BUFFER_SIZE:
2272 hci_cc_le_read_buffer_size(hdev, skb);
2273 break;
2274
2275 case HCI_OP_LE_READ_LOCAL_FEATURES:
2276 hci_cc_le_read_local_features(hdev, skb);
2277 break;
2278
2279 case HCI_OP_LE_READ_ADV_TX_POWER:
2280 hci_cc_le_read_adv_tx_power(hdev, skb);
2281 break;
2282
2283 case HCI_OP_USER_CONFIRM_REPLY:
2284 hci_cc_user_confirm_reply(hdev, skb);
2285 break;
2286
2287 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2288 hci_cc_user_confirm_neg_reply(hdev, skb);
2289 break;
2290
2291 case HCI_OP_USER_PASSKEY_REPLY:
2292 hci_cc_user_passkey_reply(hdev, skb);
2293 break;
2294
2295 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2296 hci_cc_user_passkey_neg_reply(hdev, skb);
2297 break;
2298
2299 case HCI_OP_LE_SET_SCAN_PARAM:
2300 hci_cc_le_set_scan_param(hdev, skb);
2301 break;
2302
2303 case HCI_OP_LE_SET_ADV_ENABLE:
2304 hci_cc_le_set_adv_enable(hdev, skb);
2305 break;
2306
2307 case HCI_OP_LE_SET_SCAN_ENABLE:
2308 hci_cc_le_set_scan_enable(hdev, skb);
2309 break;
2310
2311 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2312 hci_cc_le_read_white_list_size(hdev, skb);
2313 break;
2314
2315 case HCI_OP_LE_READ_SUPPORTED_STATES:
2316 hci_cc_le_read_supported_states(hdev, skb);
2317 break;
2318
2319 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2320 hci_cc_write_le_host_supported(hdev, skb);
2321 break;
2322
2323 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2324 hci_cc_write_remote_amp_assoc(hdev, skb);
2325 break;
2326
2327 default:
2328 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2329 break;
2330 }
2331
2332 if (opcode != HCI_OP_NOP)
2333 del_timer(&hdev->cmd_timer);
2334
2335 hci_req_cmd_complete(hdev, opcode, status);
2336
2337 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2338 atomic_set(&hdev->cmd_cnt, 1);
2339 if (!skb_queue_empty(&hdev->cmd_q))
2340 queue_work(hdev->workqueue, &hdev->cmd_work);
2341 }
2342 }
2343
2344 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2345 {
2346 struct hci_ev_cmd_status *ev = (void *) skb->data;
2347 __u16 opcode;
2348
2349 skb_pull(skb, sizeof(*ev));
2350
2351 opcode = __le16_to_cpu(ev->opcode);
2352
2353 switch (opcode) {
2354 case HCI_OP_INQUIRY:
2355 hci_cs_inquiry(hdev, ev->status);
2356 break;
2357
2358 case HCI_OP_CREATE_CONN:
2359 hci_cs_create_conn(hdev, ev->status);
2360 break;
2361
2362 case HCI_OP_ADD_SCO:
2363 hci_cs_add_sco(hdev, ev->status);
2364 break;
2365
2366 case HCI_OP_AUTH_REQUESTED:
2367 hci_cs_auth_requested(hdev, ev->status);
2368 break;
2369
2370 case HCI_OP_SET_CONN_ENCRYPT:
2371 hci_cs_set_conn_encrypt(hdev, ev->status);
2372 break;
2373
2374 case HCI_OP_REMOTE_NAME_REQ:
2375 hci_cs_remote_name_req(hdev, ev->status);
2376 break;
2377
2378 case HCI_OP_READ_REMOTE_FEATURES:
2379 hci_cs_read_remote_features(hdev, ev->status);
2380 break;
2381
2382 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2383 hci_cs_read_remote_ext_features(hdev, ev->status);
2384 break;
2385
2386 case HCI_OP_SETUP_SYNC_CONN:
2387 hci_cs_setup_sync_conn(hdev, ev->status);
2388 break;
2389
2390 case HCI_OP_SNIFF_MODE:
2391 hci_cs_sniff_mode(hdev, ev->status);
2392 break;
2393
2394 case HCI_OP_EXIT_SNIFF_MODE:
2395 hci_cs_exit_sniff_mode(hdev, ev->status);
2396 break;
2397
2398 case HCI_OP_DISCONNECT:
2399 hci_cs_disconnect(hdev, ev->status);
2400 break;
2401
2402 case HCI_OP_LE_CREATE_CONN:
2403 hci_cs_le_create_conn(hdev, ev->status);
2404 break;
2405
2406 case HCI_OP_CREATE_PHY_LINK:
2407 hci_cs_create_phylink(hdev, ev->status);
2408 break;
2409
2410 case HCI_OP_ACCEPT_PHY_LINK:
2411 hci_cs_accept_phylink(hdev, ev->status);
2412 break;
2413
2414 default:
2415 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2416 break;
2417 }
2418
2419 if (opcode != HCI_OP_NOP)
2420 del_timer(&hdev->cmd_timer);
2421
2422 if (ev->status ||
2423 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2424 hci_req_cmd_complete(hdev, opcode, ev->status);
2425
2426 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2427 atomic_set(&hdev->cmd_cnt, 1);
2428 if (!skb_queue_empty(&hdev->cmd_q))
2429 queue_work(hdev->workqueue, &hdev->cmd_work);
2430 }
2431 }
2432
2433 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2434 {
2435 struct hci_ev_role_change *ev = (void *) skb->data;
2436 struct hci_conn *conn;
2437
2438 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2439
2440 hci_dev_lock(hdev);
2441
2442 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2443 if (conn) {
2444 if (!ev->status) {
2445 if (ev->role)
2446 conn->link_mode &= ~HCI_LM_MASTER;
2447 else
2448 conn->link_mode |= HCI_LM_MASTER;
2449 }
2450
2451 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2452
2453 hci_role_switch_cfm(conn, ev->status, ev->role);
2454 }
2455
2456 hci_dev_unlock(hdev);
2457 }
2458
2459 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2460 {
2461 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2462 int i;
2463
2464 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2465 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2466 return;
2467 }
2468
2469 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2470 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2471 BT_DBG("%s bad parameters", hdev->name);
2472 return;
2473 }
2474
2475 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2476
2477 for (i = 0; i < ev->num_hndl; i++) {
2478 struct hci_comp_pkts_info *info = &ev->handles[i];
2479 struct hci_conn *conn;
2480 __u16 handle, count;
2481
2482 handle = __le16_to_cpu(info->handle);
2483 count = __le16_to_cpu(info->count);
2484
2485 conn = hci_conn_hash_lookup_handle(hdev, handle);
2486 if (!conn)
2487 continue;
2488
2489 conn->sent -= count;
2490
2491 switch (conn->type) {
2492 case ACL_LINK:
2493 hdev->acl_cnt += count;
2494 if (hdev->acl_cnt > hdev->acl_pkts)
2495 hdev->acl_cnt = hdev->acl_pkts;
2496 break;
2497
2498 case LE_LINK:
2499 if (hdev->le_pkts) {
2500 hdev->le_cnt += count;
2501 if (hdev->le_cnt > hdev->le_pkts)
2502 hdev->le_cnt = hdev->le_pkts;
2503 } else {
2504 hdev->acl_cnt += count;
2505 if (hdev->acl_cnt > hdev->acl_pkts)
2506 hdev->acl_cnt = hdev->acl_pkts;
2507 }
2508 break;
2509
2510 case SCO_LINK:
2511 hdev->sco_cnt += count;
2512 if (hdev->sco_cnt > hdev->sco_pkts)
2513 hdev->sco_cnt = hdev->sco_pkts;
2514 break;
2515
2516 default:
2517 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2518 break;
2519 }
2520 }
2521
2522 queue_work(hdev->workqueue, &hdev->tx_work);
2523 }
2524
2525 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2526 __u16 handle)
2527 {
2528 struct hci_chan *chan;
2529
2530 switch (hdev->dev_type) {
2531 case HCI_BREDR:
2532 return hci_conn_hash_lookup_handle(hdev, handle);
2533 case HCI_AMP:
2534 chan = hci_chan_lookup_handle(hdev, handle);
2535 if (chan)
2536 return chan->conn;
2537 break;
2538 default:
2539 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2540 break;
2541 }
2542
2543 return NULL;
2544 }
2545
2546 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2547 {
2548 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2549 int i;
2550
2551 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2552 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2553 return;
2554 }
2555
2556 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2557 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2558 BT_DBG("%s bad parameters", hdev->name);
2559 return;
2560 }
2561
2562 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2563 ev->num_hndl);
2564
2565 for (i = 0; i < ev->num_hndl; i++) {
2566 struct hci_comp_blocks_info *info = &ev->handles[i];
2567 struct hci_conn *conn = NULL;
2568 __u16 handle, block_count;
2569
2570 handle = __le16_to_cpu(info->handle);
2571 block_count = __le16_to_cpu(info->blocks);
2572
2573 conn = __hci_conn_lookup_handle(hdev, handle);
2574 if (!conn)
2575 continue;
2576
2577 conn->sent -= block_count;
2578
2579 switch (conn->type) {
2580 case ACL_LINK:
2581 case AMP_LINK:
2582 hdev->block_cnt += block_count;
2583 if (hdev->block_cnt > hdev->num_blocks)
2584 hdev->block_cnt = hdev->num_blocks;
2585 break;
2586
2587 default:
2588 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2589 break;
2590 }
2591 }
2592
2593 queue_work(hdev->workqueue, &hdev->tx_work);
2594 }
2595
2596 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2597 {
2598 struct hci_ev_mode_change *ev = (void *) skb->data;
2599 struct hci_conn *conn;
2600
2601 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2602
2603 hci_dev_lock(hdev);
2604
2605 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2606 if (conn) {
2607 conn->mode = ev->mode;
2608 conn->interval = __le16_to_cpu(ev->interval);
2609
2610 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2611 &conn->flags)) {
2612 if (conn->mode == HCI_CM_ACTIVE)
2613 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2614 else
2615 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2616 }
2617
2618 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2619 hci_sco_setup(conn, ev->status);
2620 }
2621
2622 hci_dev_unlock(hdev);
2623 }
2624
2625 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2626 {
2627 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2628 struct hci_conn *conn;
2629
2630 BT_DBG("%s", hdev->name);
2631
2632 hci_dev_lock(hdev);
2633
2634 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2635 if (!conn)
2636 goto unlock;
2637
2638 if (conn->state == BT_CONNECTED) {
2639 hci_conn_hold(conn);
2640 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2641 hci_conn_drop(conn);
2642 }
2643
2644 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2645 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2646 sizeof(ev->bdaddr), &ev->bdaddr);
2647 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2648 u8 secure;
2649
2650 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2651 secure = 1;
2652 else
2653 secure = 0;
2654
2655 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2656 }
2657
2658 unlock:
2659 hci_dev_unlock(hdev);
2660 }
2661
2662 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2663 {
2664 struct hci_ev_link_key_req *ev = (void *) skb->data;
2665 struct hci_cp_link_key_reply cp;
2666 struct hci_conn *conn;
2667 struct link_key *key;
2668
2669 BT_DBG("%s", hdev->name);
2670
2671 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2672 return;
2673
2674 hci_dev_lock(hdev);
2675
2676 key = hci_find_link_key(hdev, &ev->bdaddr);
2677 if (!key) {
2678 BT_DBG("%s link key not found for %pMR", hdev->name,
2679 &ev->bdaddr);
2680 goto not_found;
2681 }
2682
2683 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2684 &ev->bdaddr);
2685
2686 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2687 key->type == HCI_LK_DEBUG_COMBINATION) {
2688 BT_DBG("%s ignoring debug key", hdev->name);
2689 goto not_found;
2690 }
2691
2692 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2693 if (conn) {
2694 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2695 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2696 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2697 goto not_found;
2698 }
2699
2700 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2701 conn->pending_sec_level == BT_SECURITY_HIGH) {
2702 BT_DBG("%s ignoring key unauthenticated for high security",
2703 hdev->name);
2704 goto not_found;
2705 }
2706
2707 conn->key_type = key->type;
2708 conn->pin_length = key->pin_len;
2709 }
2710
2711 bacpy(&cp.bdaddr, &ev->bdaddr);
2712 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2713
2714 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2715
2716 hci_dev_unlock(hdev);
2717
2718 return;
2719
2720 not_found:
2721 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2722 hci_dev_unlock(hdev);
2723 }
2724
2725 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2726 {
2727 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2728 struct hci_conn *conn;
2729 u8 pin_len = 0;
2730
2731 BT_DBG("%s", hdev->name);
2732
2733 hci_dev_lock(hdev);
2734
2735 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2736 if (conn) {
2737 hci_conn_hold(conn);
2738 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2739 pin_len = conn->pin_length;
2740
2741 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2742 conn->key_type = ev->key_type;
2743
2744 hci_conn_drop(conn);
2745 }
2746
2747 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2748 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2749 ev->key_type, pin_len);
2750
2751 hci_dev_unlock(hdev);
2752 }
2753
2754 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2755 {
2756 struct hci_ev_clock_offset *ev = (void *) skb->data;
2757 struct hci_conn *conn;
2758
2759 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2760
2761 hci_dev_lock(hdev);
2762
2763 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2764 if (conn && !ev->status) {
2765 struct inquiry_entry *ie;
2766
2767 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2768 if (ie) {
2769 ie->data.clock_offset = ev->clock_offset;
2770 ie->timestamp = jiffies;
2771 }
2772 }
2773
2774 hci_dev_unlock(hdev);
2775 }
2776
2777 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2778 {
2779 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2780 struct hci_conn *conn;
2781
2782 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2783
2784 hci_dev_lock(hdev);
2785
2786 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2787 if (conn && !ev->status)
2788 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2789
2790 hci_dev_unlock(hdev);
2791 }
2792
2793 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2794 {
2795 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2796 struct inquiry_entry *ie;
2797
2798 BT_DBG("%s", hdev->name);
2799
2800 hci_dev_lock(hdev);
2801
2802 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2803 if (ie) {
2804 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2805 ie->timestamp = jiffies;
2806 }
2807
2808 hci_dev_unlock(hdev);
2809 }
2810
2811 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2812 struct sk_buff *skb)
2813 {
2814 struct inquiry_data data;
2815 int num_rsp = *((__u8 *) skb->data);
2816 bool name_known, ssp;
2817
2818 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2819
2820 if (!num_rsp)
2821 return;
2822
2823 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2824 return;
2825
2826 hci_dev_lock(hdev);
2827
2828 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2829 struct inquiry_info_with_rssi_and_pscan_mode *info;
2830 info = (void *) (skb->data + 1);
2831
2832 for (; num_rsp; num_rsp--, info++) {
2833 bacpy(&data.bdaddr, &info->bdaddr);
2834 data.pscan_rep_mode = info->pscan_rep_mode;
2835 data.pscan_period_mode = info->pscan_period_mode;
2836 data.pscan_mode = info->pscan_mode;
2837 memcpy(data.dev_class, info->dev_class, 3);
2838 data.clock_offset = info->clock_offset;
2839 data.rssi = info->rssi;
2840 data.ssp_mode = 0x00;
2841
2842 name_known = hci_inquiry_cache_update(hdev, &data,
2843 false, &ssp);
2844 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2845 info->dev_class, info->rssi,
2846 !name_known, ssp, NULL, 0);
2847 }
2848 } else {
2849 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2850
2851 for (; num_rsp; num_rsp--, info++) {
2852 bacpy(&data.bdaddr, &info->bdaddr);
2853 data.pscan_rep_mode = info->pscan_rep_mode;
2854 data.pscan_period_mode = info->pscan_period_mode;
2855 data.pscan_mode = 0x00;
2856 memcpy(data.dev_class, info->dev_class, 3);
2857 data.clock_offset = info->clock_offset;
2858 data.rssi = info->rssi;
2859 data.ssp_mode = 0x00;
2860 name_known = hci_inquiry_cache_update(hdev, &data,
2861 false, &ssp);
2862 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2863 info->dev_class, info->rssi,
2864 !name_known, ssp, NULL, 0);
2865 }
2866 }
2867
2868 hci_dev_unlock(hdev);
2869 }
2870
2871 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2872 struct sk_buff *skb)
2873 {
2874 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2875 struct hci_conn *conn;
2876
2877 BT_DBG("%s", hdev->name);
2878
2879 hci_dev_lock(hdev);
2880
2881 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2882 if (!conn)
2883 goto unlock;
2884
2885 if (ev->page < HCI_MAX_PAGES)
2886 memcpy(conn->features[ev->page], ev->features, 8);
2887
2888 if (!ev->status && ev->page == 0x01) {
2889 struct inquiry_entry *ie;
2890
2891 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2892 if (ie)
2893 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2894
2895 if (ev->features[0] & LMP_HOST_SSP) {
2896 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2897 } else {
2898 /* It is mandatory by the Bluetooth specification that
2899 * Extended Inquiry Results are only used when Secure
2900 * Simple Pairing is enabled, but some devices violate
2901 * this.
2902 *
2903 * To make these devices work, the internal SSP
2904 * enabled flag needs to be cleared if the remote host
2905 * features do not indicate SSP support */
2906 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2907 }
2908 }
2909
2910 if (conn->state != BT_CONFIG)
2911 goto unlock;
2912
2913 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2914 struct hci_cp_remote_name_req cp;
2915 memset(&cp, 0, sizeof(cp));
2916 bacpy(&cp.bdaddr, &conn->dst);
2917 cp.pscan_rep_mode = 0x02;
2918 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2919 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2920 mgmt_device_connected(hdev, &conn->dst, conn->type,
2921 conn->dst_type, 0, NULL, 0,
2922 conn->dev_class);
2923
2924 if (!hci_outgoing_auth_needed(hdev, conn)) {
2925 conn->state = BT_CONNECTED;
2926 hci_proto_connect_cfm(conn, ev->status);
2927 hci_conn_drop(conn);
2928 }
2929
2930 unlock:
2931 hci_dev_unlock(hdev);
2932 }
2933
2934 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2935 struct sk_buff *skb)
2936 {
2937 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2938 struct hci_conn *conn;
2939
2940 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2941
2942 hci_dev_lock(hdev);
2943
2944 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2945 if (!conn) {
2946 if (ev->link_type == ESCO_LINK)
2947 goto unlock;
2948
2949 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2950 if (!conn)
2951 goto unlock;
2952
2953 conn->type = SCO_LINK;
2954 }
2955
2956 switch (ev->status) {
2957 case 0x00:
2958 conn->handle = __le16_to_cpu(ev->handle);
2959 conn->state = BT_CONNECTED;
2960
2961 hci_conn_add_sysfs(conn);
2962 break;
2963
2964 case 0x11: /* Unsupported Feature or Parameter Value */
2965 case 0x1c: /* SCO interval rejected */
2966 case 0x1a: /* Unsupported Remote Feature */
2967 case 0x1f: /* Unspecified error */
2968 if (conn->out && conn->attempt < 2) {
2969 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2970 (hdev->esco_type & EDR_ESCO_MASK);
2971 hci_setup_sync(conn, conn->link->handle);
2972 goto unlock;
2973 }
2974 /* fall through */
2975
2976 default:
2977 conn->state = BT_CLOSED;
2978 break;
2979 }
2980
2981 hci_proto_connect_cfm(conn, ev->status);
2982 if (ev->status)
2983 hci_conn_del(conn);
2984
2985 unlock:
2986 hci_dev_unlock(hdev);
2987 }
2988
2989 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2990 struct sk_buff *skb)
2991 {
2992 struct inquiry_data data;
2993 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2994 int num_rsp = *((__u8 *) skb->data);
2995 size_t eir_len;
2996
2997 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2998
2999 if (!num_rsp)
3000 return;
3001
3002 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3003 return;
3004
3005 hci_dev_lock(hdev);
3006
3007 for (; num_rsp; num_rsp--, info++) {
3008 bool name_known, ssp;
3009
3010 bacpy(&data.bdaddr, &info->bdaddr);
3011 data.pscan_rep_mode = info->pscan_rep_mode;
3012 data.pscan_period_mode = info->pscan_period_mode;
3013 data.pscan_mode = 0x00;
3014 memcpy(data.dev_class, info->dev_class, 3);
3015 data.clock_offset = info->clock_offset;
3016 data.rssi = info->rssi;
3017 data.ssp_mode = 0x01;
3018
3019 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3020 name_known = eir_has_data_type(info->data,
3021 sizeof(info->data),
3022 EIR_NAME_COMPLETE);
3023 else
3024 name_known = true;
3025
3026 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3027 &ssp);
3028 eir_len = eir_get_length(info->data, sizeof(info->data));
3029 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3030 info->dev_class, info->rssi, !name_known,
3031 ssp, info->data, eir_len);
3032 }
3033
3034 hci_dev_unlock(hdev);
3035 }
3036
3037 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3038 struct sk_buff *skb)
3039 {
3040 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3041 struct hci_conn *conn;
3042
3043 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3044 __le16_to_cpu(ev->handle));
3045
3046 hci_dev_lock(hdev);
3047
3048 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3049 if (!conn)
3050 goto unlock;
3051
3052 if (!ev->status)
3053 conn->sec_level = conn->pending_sec_level;
3054
3055 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3056
3057 if (ev->status && conn->state == BT_CONNECTED) {
3058 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3059 hci_conn_drop(conn);
3060 goto unlock;
3061 }
3062
3063 if (conn->state == BT_CONFIG) {
3064 if (!ev->status)
3065 conn->state = BT_CONNECTED;
3066
3067 hci_proto_connect_cfm(conn, ev->status);
3068 hci_conn_drop(conn);
3069 } else {
3070 hci_auth_cfm(conn, ev->status);
3071
3072 hci_conn_hold(conn);
3073 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3074 hci_conn_drop(conn);
3075 }
3076
3077 unlock:
3078 hci_dev_unlock(hdev);
3079 }
3080
3081 static u8 hci_get_auth_req(struct hci_conn *conn)
3082 {
3083 /* If remote requests dedicated bonding follow that lead */
3084 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3085 /* If both remote and local IO capabilities allow MITM
3086 * protection then require it, otherwise don't */
3087 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3088 return 0x02;
3089 else
3090 return 0x03;
3091 }
3092
3093 /* If remote requests no-bonding follow that lead */
3094 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3095 return conn->remote_auth | (conn->auth_type & 0x01);
3096
3097 return conn->auth_type;
3098 }
3099
3100 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3101 {
3102 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3103 struct hci_conn *conn;
3104
3105 BT_DBG("%s", hdev->name);
3106
3107 hci_dev_lock(hdev);
3108
3109 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3110 if (!conn)
3111 goto unlock;
3112
3113 hci_conn_hold(conn);
3114
3115 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3116 goto unlock;
3117
3118 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3119 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3120 struct hci_cp_io_capability_reply cp;
3121
3122 bacpy(&cp.bdaddr, &ev->bdaddr);
3123 /* Change the IO capability from KeyboardDisplay
3124 * to DisplayYesNo as it is not supported by BT spec. */
3125 cp.capability = (conn->io_capability == 0x04) ?
3126 0x01 : conn->io_capability;
3127 conn->auth_type = hci_get_auth_req(conn);
3128 cp.authentication = conn->auth_type;
3129
3130 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3131 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3132 cp.oob_data = 0x01;
3133 else
3134 cp.oob_data = 0x00;
3135
3136 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3137 sizeof(cp), &cp);
3138 } else {
3139 struct hci_cp_io_capability_neg_reply cp;
3140
3141 bacpy(&cp.bdaddr, &ev->bdaddr);
3142 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3143
3144 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3145 sizeof(cp), &cp);
3146 }
3147
3148 unlock:
3149 hci_dev_unlock(hdev);
3150 }
3151
3152 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3153 {
3154 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3155 struct hci_conn *conn;
3156
3157 BT_DBG("%s", hdev->name);
3158
3159 hci_dev_lock(hdev);
3160
3161 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3162 if (!conn)
3163 goto unlock;
3164
3165 conn->remote_cap = ev->capability;
3166 conn->remote_auth = ev->authentication;
3167 if (ev->oob_data)
3168 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3169
3170 unlock:
3171 hci_dev_unlock(hdev);
3172 }
3173
3174 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3175 struct sk_buff *skb)
3176 {
3177 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3178 int loc_mitm, rem_mitm, confirm_hint = 0;
3179 struct hci_conn *conn;
3180
3181 BT_DBG("%s", hdev->name);
3182
3183 hci_dev_lock(hdev);
3184
3185 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3186 goto unlock;
3187
3188 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3189 if (!conn)
3190 goto unlock;
3191
3192 loc_mitm = (conn->auth_type & 0x01);
3193 rem_mitm = (conn->remote_auth & 0x01);
3194
3195 /* If we require MITM but the remote device can't provide that
3196 * (it has NoInputNoOutput) then reject the confirmation
3197 * request. The only exception is when we're dedicated bonding
3198 * initiators (connect_cfm_cb set) since then we always have the MITM
3199 * bit set. */
3200 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3201 BT_DBG("Rejecting request: remote device can't provide MITM");
3202 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3203 sizeof(ev->bdaddr), &ev->bdaddr);
3204 goto unlock;
3205 }
3206
3207 /* If no side requires MITM protection; auto-accept */
3208 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3209 (!rem_mitm || conn->io_capability == 0x03)) {
3210
3211 /* If we're not the initiators request authorization to
3212 * proceed from user space (mgmt_user_confirm with
3213 * confirm_hint set to 1). */
3214 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3215 BT_DBG("Confirming auto-accept as acceptor");
3216 confirm_hint = 1;
3217 goto confirm;
3218 }
3219
3220 BT_DBG("Auto-accept of user confirmation with %ums delay",
3221 hdev->auto_accept_delay);
3222
3223 if (hdev->auto_accept_delay > 0) {
3224 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3225 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3226 goto unlock;
3227 }
3228
3229 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3230 sizeof(ev->bdaddr), &ev->bdaddr);
3231 goto unlock;
3232 }
3233
3234 confirm:
3235 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3236 confirm_hint);
3237
3238 unlock:
3239 hci_dev_unlock(hdev);
3240 }
3241
3242 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3243 struct sk_buff *skb)
3244 {
3245 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3246
3247 BT_DBG("%s", hdev->name);
3248
3249 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3250 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3251 }
3252
3253 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3254 struct sk_buff *skb)
3255 {
3256 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3257 struct hci_conn *conn;
3258
3259 BT_DBG("%s", hdev->name);
3260
3261 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3262 if (!conn)
3263 return;
3264
3265 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3266 conn->passkey_entered = 0;
3267
3268 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3269 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3270 conn->dst_type, conn->passkey_notify,
3271 conn->passkey_entered);
3272 }
3273
3274 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3275 {
3276 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3277 struct hci_conn *conn;
3278
3279 BT_DBG("%s", hdev->name);
3280
3281 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3282 if (!conn)
3283 return;
3284
3285 switch (ev->type) {
3286 case HCI_KEYPRESS_STARTED:
3287 conn->passkey_entered = 0;
3288 return;
3289
3290 case HCI_KEYPRESS_ENTERED:
3291 conn->passkey_entered++;
3292 break;
3293
3294 case HCI_KEYPRESS_ERASED:
3295 conn->passkey_entered--;
3296 break;
3297
3298 case HCI_KEYPRESS_CLEARED:
3299 conn->passkey_entered = 0;
3300 break;
3301
3302 case HCI_KEYPRESS_COMPLETED:
3303 return;
3304 }
3305
3306 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3307 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3308 conn->dst_type, conn->passkey_notify,
3309 conn->passkey_entered);
3310 }
3311
3312 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3313 struct sk_buff *skb)
3314 {
3315 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3316 struct hci_conn *conn;
3317
3318 BT_DBG("%s", hdev->name);
3319
3320 hci_dev_lock(hdev);
3321
3322 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3323 if (!conn)
3324 goto unlock;
3325
3326 /* To avoid duplicate auth_failed events to user space we check
3327 * the HCI_CONN_AUTH_PEND flag which will be set if we
3328 * initiated the authentication. A traditional auth_complete
3329 * event gets always produced as initiator and is also mapped to
3330 * the mgmt_auth_failed event */
3331 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3332 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3333 ev->status);
3334
3335 hci_conn_drop(conn);
3336
3337 unlock:
3338 hci_dev_unlock(hdev);
3339 }
3340
3341 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3342 struct sk_buff *skb)
3343 {
3344 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3345 struct inquiry_entry *ie;
3346 struct hci_conn *conn;
3347
3348 BT_DBG("%s", hdev->name);
3349
3350 hci_dev_lock(hdev);
3351
3352 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3353 if (conn)
3354 memcpy(conn->features[1], ev->features, 8);
3355
3356 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3357 if (ie)
3358 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3359
3360 hci_dev_unlock(hdev);
3361 }
3362
3363 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3364 struct sk_buff *skb)
3365 {
3366 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3367 struct oob_data *data;
3368
3369 BT_DBG("%s", hdev->name);
3370
3371 hci_dev_lock(hdev);
3372
3373 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3374 goto unlock;
3375
3376 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3377 if (data) {
3378 struct hci_cp_remote_oob_data_reply cp;
3379
3380 bacpy(&cp.bdaddr, &ev->bdaddr);
3381 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3382 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3383
3384 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3385 &cp);
3386 } else {
3387 struct hci_cp_remote_oob_data_neg_reply cp;
3388
3389 bacpy(&cp.bdaddr, &ev->bdaddr);
3390 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3391 &cp);
3392 }
3393
3394 unlock:
3395 hci_dev_unlock(hdev);
3396 }
3397
3398 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3399 struct sk_buff *skb)
3400 {
3401 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3402 struct hci_conn *hcon, *bredr_hcon;
3403
3404 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3405 ev->status);
3406
3407 hci_dev_lock(hdev);
3408
3409 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3410 if (!hcon) {
3411 hci_dev_unlock(hdev);
3412 return;
3413 }
3414
3415 if (ev->status) {
3416 hci_conn_del(hcon);
3417 hci_dev_unlock(hdev);
3418 return;
3419 }
3420
3421 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3422
3423 hcon->state = BT_CONNECTED;
3424 bacpy(&hcon->dst, &bredr_hcon->dst);
3425
3426 hci_conn_hold(hcon);
3427 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3428 hci_conn_drop(hcon);
3429
3430 hci_conn_add_sysfs(hcon);
3431
3432 amp_physical_cfm(bredr_hcon, hcon);
3433
3434 hci_dev_unlock(hdev);
3435 }
3436
3437 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3438 {
3439 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3440 struct hci_conn *hcon;
3441 struct hci_chan *hchan;
3442 struct amp_mgr *mgr;
3443
3444 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3445 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3446 ev->status);
3447
3448 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3449 if (!hcon)
3450 return;
3451
3452 /* Create AMP hchan */
3453 hchan = hci_chan_create(hcon);
3454 if (!hchan)
3455 return;
3456
3457 hchan->handle = le16_to_cpu(ev->handle);
3458
3459 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3460
3461 mgr = hcon->amp_mgr;
3462 if (mgr && mgr->bredr_chan) {
3463 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3464
3465 l2cap_chan_lock(bredr_chan);
3466
3467 bredr_chan->conn->mtu = hdev->block_mtu;
3468 l2cap_logical_cfm(bredr_chan, hchan, 0);
3469 hci_conn_hold(hcon);
3470
3471 l2cap_chan_unlock(bredr_chan);
3472 }
3473 }
3474
3475 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3476 struct sk_buff *skb)
3477 {
3478 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3479 struct hci_chan *hchan;
3480
3481 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3482 le16_to_cpu(ev->handle), ev->status);
3483
3484 if (ev->status)
3485 return;
3486
3487 hci_dev_lock(hdev);
3488
3489 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3490 if (!hchan)
3491 goto unlock;
3492
3493 amp_destroy_logical_link(hchan, ev->reason);
3494
3495 unlock:
3496 hci_dev_unlock(hdev);
3497 }
3498
3499 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3500 struct sk_buff *skb)
3501 {
3502 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3503 struct hci_conn *hcon;
3504
3505 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3506
3507 if (ev->status)
3508 return;
3509
3510 hci_dev_lock(hdev);
3511
3512 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3513 if (hcon) {
3514 hcon->state = BT_CLOSED;
3515 hci_conn_del(hcon);
3516 }
3517
3518 hci_dev_unlock(hdev);
3519 }
3520
3521 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3522 {
3523 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3524 struct hci_conn *conn;
3525
3526 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3527
3528 hci_dev_lock(hdev);
3529
3530 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3531 if (!conn) {
3532 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3533 if (!conn) {
3534 BT_ERR("No memory for new connection");
3535 goto unlock;
3536 }
3537
3538 conn->dst_type = ev->bdaddr_type;
3539
3540 if (ev->role == LE_CONN_ROLE_MASTER) {
3541 conn->out = true;
3542 conn->link_mode |= HCI_LM_MASTER;
3543 }
3544 }
3545
3546 if (ev->status) {
3547 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3548 conn->dst_type, ev->status);
3549 hci_proto_connect_cfm(conn, ev->status);
3550 conn->state = BT_CLOSED;
3551 hci_conn_del(conn);
3552 goto unlock;
3553 }
3554
3555 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3556 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3557 conn->dst_type, 0, NULL, 0, NULL);
3558
3559 conn->sec_level = BT_SECURITY_LOW;
3560 conn->handle = __le16_to_cpu(ev->handle);
3561 conn->state = BT_CONNECTED;
3562
3563 hci_conn_add_sysfs(conn);
3564
3565 hci_proto_connect_cfm(conn, ev->status);
3566
3567 unlock:
3568 hci_dev_unlock(hdev);
3569 }
3570
3571 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3572 {
3573 u8 num_reports = skb->data[0];
3574 void *ptr = &skb->data[1];
3575 s8 rssi;
3576
3577 while (num_reports--) {
3578 struct hci_ev_le_advertising_info *ev = ptr;
3579
3580 rssi = ev->data[ev->length];
3581 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3582 NULL, rssi, 0, 1, ev->data, ev->length);
3583
3584 ptr += sizeof(*ev) + ev->length + 1;
3585 }
3586 }
3587
3588 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3589 {
3590 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3591 struct hci_cp_le_ltk_reply cp;
3592 struct hci_cp_le_ltk_neg_reply neg;
3593 struct hci_conn *conn;
3594 struct smp_ltk *ltk;
3595
3596 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3597
3598 hci_dev_lock(hdev);
3599
3600 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3601 if (conn == NULL)
3602 goto not_found;
3603
3604 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3605 if (ltk == NULL)
3606 goto not_found;
3607
3608 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3609 cp.handle = cpu_to_le16(conn->handle);
3610
3611 if (ltk->authenticated)
3612 conn->sec_level = BT_SECURITY_HIGH;
3613
3614 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3615
3616 if (ltk->type & HCI_SMP_STK) {
3617 list_del(&ltk->list);
3618 kfree(ltk);
3619 }
3620
3621 hci_dev_unlock(hdev);
3622
3623 return;
3624
3625 not_found:
3626 neg.handle = ev->handle;
3627 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3628 hci_dev_unlock(hdev);
3629 }
3630
3631 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3632 {
3633 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3634
3635 skb_pull(skb, sizeof(*le_ev));
3636
3637 switch (le_ev->subevent) {
3638 case HCI_EV_LE_CONN_COMPLETE:
3639 hci_le_conn_complete_evt(hdev, skb);
3640 break;
3641
3642 case HCI_EV_LE_ADVERTISING_REPORT:
3643 hci_le_adv_report_evt(hdev, skb);
3644 break;
3645
3646 case HCI_EV_LE_LTK_REQ:
3647 hci_le_ltk_request_evt(hdev, skb);
3648 break;
3649
3650 default:
3651 break;
3652 }
3653 }
3654
3655 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3656 {
3657 struct hci_ev_channel_selected *ev = (void *) skb->data;
3658 struct hci_conn *hcon;
3659
3660 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3661
3662 skb_pull(skb, sizeof(*ev));
3663
3664 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3665 if (!hcon)
3666 return;
3667
3668 amp_read_loc_assoc_final_data(hdev, hcon);
3669 }
3670
3671 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3672 {
3673 struct hci_event_hdr *hdr = (void *) skb->data;
3674 __u8 event = hdr->evt;
3675
3676 hci_dev_lock(hdev);
3677
3678 /* Received events are (currently) only needed when a request is
3679 * ongoing so avoid unnecessary memory allocation.
3680 */
3681 if (hdev->req_status == HCI_REQ_PEND) {
3682 kfree_skb(hdev->recv_evt);
3683 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3684 }
3685
3686 hci_dev_unlock(hdev);
3687
3688 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3689
3690 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3691 struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data;
3692 u16 opcode = __le16_to_cpu(hdr->opcode);
3693
3694 hci_req_cmd_complete(hdev, opcode, 0);
3695 }
3696
3697 switch (event) {
3698 case HCI_EV_INQUIRY_COMPLETE:
3699 hci_inquiry_complete_evt(hdev, skb);
3700 break;
3701
3702 case HCI_EV_INQUIRY_RESULT:
3703 hci_inquiry_result_evt(hdev, skb);
3704 break;
3705
3706 case HCI_EV_CONN_COMPLETE:
3707 hci_conn_complete_evt(hdev, skb);
3708 break;
3709
3710 case HCI_EV_CONN_REQUEST:
3711 hci_conn_request_evt(hdev, skb);
3712 break;
3713
3714 case HCI_EV_DISCONN_COMPLETE:
3715 hci_disconn_complete_evt(hdev, skb);
3716 break;
3717
3718 case HCI_EV_AUTH_COMPLETE:
3719 hci_auth_complete_evt(hdev, skb);
3720 break;
3721
3722 case HCI_EV_REMOTE_NAME:
3723 hci_remote_name_evt(hdev, skb);
3724 break;
3725
3726 case HCI_EV_ENCRYPT_CHANGE:
3727 hci_encrypt_change_evt(hdev, skb);
3728 break;
3729
3730 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3731 hci_change_link_key_complete_evt(hdev, skb);
3732 break;
3733
3734 case HCI_EV_REMOTE_FEATURES:
3735 hci_remote_features_evt(hdev, skb);
3736 break;
3737
3738 case HCI_EV_CMD_COMPLETE:
3739 hci_cmd_complete_evt(hdev, skb);
3740 break;
3741
3742 case HCI_EV_CMD_STATUS:
3743 hci_cmd_status_evt(hdev, skb);
3744 break;
3745
3746 case HCI_EV_ROLE_CHANGE:
3747 hci_role_change_evt(hdev, skb);
3748 break;
3749
3750 case HCI_EV_NUM_COMP_PKTS:
3751 hci_num_comp_pkts_evt(hdev, skb);
3752 break;
3753
3754 case HCI_EV_MODE_CHANGE:
3755 hci_mode_change_evt(hdev, skb);
3756 break;
3757
3758 case HCI_EV_PIN_CODE_REQ:
3759 hci_pin_code_request_evt(hdev, skb);
3760 break;
3761
3762 case HCI_EV_LINK_KEY_REQ:
3763 hci_link_key_request_evt(hdev, skb);
3764 break;
3765
3766 case HCI_EV_LINK_KEY_NOTIFY:
3767 hci_link_key_notify_evt(hdev, skb);
3768 break;
3769
3770 case HCI_EV_CLOCK_OFFSET:
3771 hci_clock_offset_evt(hdev, skb);
3772 break;
3773
3774 case HCI_EV_PKT_TYPE_CHANGE:
3775 hci_pkt_type_change_evt(hdev, skb);
3776 break;
3777
3778 case HCI_EV_PSCAN_REP_MODE:
3779 hci_pscan_rep_mode_evt(hdev, skb);
3780 break;
3781
3782 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3783 hci_inquiry_result_with_rssi_evt(hdev, skb);
3784 break;
3785
3786 case HCI_EV_REMOTE_EXT_FEATURES:
3787 hci_remote_ext_features_evt(hdev, skb);
3788 break;
3789
3790 case HCI_EV_SYNC_CONN_COMPLETE:
3791 hci_sync_conn_complete_evt(hdev, skb);
3792 break;
3793
3794 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3795 hci_extended_inquiry_result_evt(hdev, skb);
3796 break;
3797
3798 case HCI_EV_KEY_REFRESH_COMPLETE:
3799 hci_key_refresh_complete_evt(hdev, skb);
3800 break;
3801
3802 case HCI_EV_IO_CAPA_REQUEST:
3803 hci_io_capa_request_evt(hdev, skb);
3804 break;
3805
3806 case HCI_EV_IO_CAPA_REPLY:
3807 hci_io_capa_reply_evt(hdev, skb);
3808 break;
3809
3810 case HCI_EV_USER_CONFIRM_REQUEST:
3811 hci_user_confirm_request_evt(hdev, skb);
3812 break;
3813
3814 case HCI_EV_USER_PASSKEY_REQUEST:
3815 hci_user_passkey_request_evt(hdev, skb);
3816 break;
3817
3818 case HCI_EV_USER_PASSKEY_NOTIFY:
3819 hci_user_passkey_notify_evt(hdev, skb);
3820 break;
3821
3822 case HCI_EV_KEYPRESS_NOTIFY:
3823 hci_keypress_notify_evt(hdev, skb);
3824 break;
3825
3826 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3827 hci_simple_pair_complete_evt(hdev, skb);
3828 break;
3829
3830 case HCI_EV_REMOTE_HOST_FEATURES:
3831 hci_remote_host_features_evt(hdev, skb);
3832 break;
3833
3834 case HCI_EV_LE_META:
3835 hci_le_meta_evt(hdev, skb);
3836 break;
3837
3838 case HCI_EV_CHANNEL_SELECTED:
3839 hci_chan_selected_evt(hdev, skb);
3840 break;
3841
3842 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3843 hci_remote_oob_data_request_evt(hdev, skb);
3844 break;
3845
3846 case HCI_EV_PHY_LINK_COMPLETE:
3847 hci_phy_link_complete_evt(hdev, skb);
3848 break;
3849
3850 case HCI_EV_LOGICAL_LINK_COMPLETE:
3851 hci_loglink_complete_evt(hdev, skb);
3852 break;
3853
3854 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3855 hci_disconn_loglink_complete_evt(hdev, skb);
3856 break;
3857
3858 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3859 hci_disconn_phylink_complete_evt(hdev, skb);
3860 break;
3861
3862 case HCI_EV_NUM_COMP_BLOCKS:
3863 hci_num_comp_blocks_evt(hdev, skb);
3864 break;
3865
3866 default:
3867 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3868 break;
3869 }
3870
3871 kfree_skb(skb);
3872 hdev->stat.evt_rx++;
3873 }