Bluetooth: Fix HCI request framework
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
34
35 /* Handle HCI Event packets */
36
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 __u8 status = *((__u8 *) skb->data);
40
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
42
43 if (status) {
44 hci_dev_lock(hdev);
45 mgmt_stop_discovery_failed(hdev, status);
46 hci_dev_unlock(hdev);
47 return;
48 }
49
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_conn_check_pending(hdev);
57 }
58
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn) {
105 if (rp->role)
106 conn->link_mode &= ~HCI_LM_MASTER;
107 else
108 conn->link_mode |= HCI_LM_MASTER;
109 }
110
111 hci_dev_unlock(hdev);
112 }
113
114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115 {
116 struct hci_rp_read_link_policy *rp = (void *) skb->data;
117 struct hci_conn *conn;
118
119 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
120
121 if (rp->status)
122 return;
123
124 hci_dev_lock(hdev);
125
126 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127 if (conn)
128 conn->link_policy = __le16_to_cpu(rp->policy);
129
130 hci_dev_unlock(hdev);
131 }
132
133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134 {
135 struct hci_rp_write_link_policy *rp = (void *) skb->data;
136 struct hci_conn *conn;
137 void *sent;
138
139 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140
141 if (rp->status)
142 return;
143
144 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 if (!sent)
146 return;
147
148 hci_dev_lock(hdev);
149
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 if (conn)
152 conn->link_policy = get_unaligned_le16(sent + 2);
153
154 hci_dev_unlock(hdev);
155 }
156
157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 struct sk_buff *skb)
159 {
160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
161
162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163
164 if (rp->status)
165 return;
166
167 hdev->link_policy = __le16_to_cpu(rp->policy);
168 }
169
170 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 struct sk_buff *skb)
172 {
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
177
178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 if (!sent)
180 return;
181
182 if (!status)
183 hdev->link_policy = get_unaligned_le16(sent);
184 }
185
186 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
187 {
188 __u8 status = *((__u8 *) skb->data);
189
190 BT_DBG("%s status 0x%2.2x", hdev->name, status);
191
192 clear_bit(HCI_RESET, &hdev->flags);
193
194 /* Reset all non-persistent flags */
195 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
196
197 hdev->discovery.state = DISCOVERY_STOPPED;
198 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
199 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
200
201 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
202 hdev->adv_data_len = 0;
203 }
204
205 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
206 {
207 __u8 status = *((__u8 *) skb->data);
208 void *sent;
209
210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
211
212 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
213 if (!sent)
214 return;
215
216 hci_dev_lock(hdev);
217
218 if (test_bit(HCI_MGMT, &hdev->dev_flags))
219 mgmt_set_local_name_complete(hdev, sent, status);
220 else if (!status)
221 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
222
223 hci_dev_unlock(hdev);
224 }
225
226 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
227 {
228 struct hci_rp_read_local_name *rp = (void *) skb->data;
229
230 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
231
232 if (rp->status)
233 return;
234
235 if (test_bit(HCI_SETUP, &hdev->dev_flags))
236 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
237 }
238
239 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
240 {
241 __u8 status = *((__u8 *) skb->data);
242 void *sent;
243
244 BT_DBG("%s status 0x%2.2x", hdev->name, status);
245
246 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
247 if (!sent)
248 return;
249
250 if (!status) {
251 __u8 param = *((__u8 *) sent);
252
253 if (param == AUTH_ENABLED)
254 set_bit(HCI_AUTH, &hdev->flags);
255 else
256 clear_bit(HCI_AUTH, &hdev->flags);
257 }
258
259 if (test_bit(HCI_MGMT, &hdev->dev_flags))
260 mgmt_auth_enable_complete(hdev, status);
261 }
262
263 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264 {
265 __u8 status = *((__u8 *) skb->data);
266 void *sent;
267
268 BT_DBG("%s status 0x%2.2x", hdev->name, status);
269
270 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
271 if (!sent)
272 return;
273
274 if (!status) {
275 __u8 param = *((__u8 *) sent);
276
277 if (param)
278 set_bit(HCI_ENCRYPT, &hdev->flags);
279 else
280 clear_bit(HCI_ENCRYPT, &hdev->flags);
281 }
282 }
283
284 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
285 {
286 __u8 param, status = *((__u8 *) skb->data);
287 int old_pscan, old_iscan;
288 void *sent;
289
290 BT_DBG("%s status 0x%2.2x", hdev->name, status);
291
292 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
293 if (!sent)
294 return;
295
296 param = *((__u8 *) sent);
297
298 hci_dev_lock(hdev);
299
300 if (status) {
301 mgmt_write_scan_failed(hdev, param, status);
302 hdev->discov_timeout = 0;
303 goto done;
304 }
305
306 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
307 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
308
309 if (param & SCAN_INQUIRY) {
310 set_bit(HCI_ISCAN, &hdev->flags);
311 if (!old_iscan)
312 mgmt_discoverable(hdev, 1);
313 if (hdev->discov_timeout > 0) {
314 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
315 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
316 to);
317 }
318 } else if (old_iscan)
319 mgmt_discoverable(hdev, 0);
320
321 if (param & SCAN_PAGE) {
322 set_bit(HCI_PSCAN, &hdev->flags);
323 if (!old_pscan)
324 mgmt_connectable(hdev, 1);
325 } else if (old_pscan)
326 mgmt_connectable(hdev, 0);
327
328 done:
329 hci_dev_unlock(hdev);
330 }
331
332 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
333 {
334 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
335
336 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
337
338 if (rp->status)
339 return;
340
341 memcpy(hdev->dev_class, rp->dev_class, 3);
342
343 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
344 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
345 }
346
347 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
348 {
349 __u8 status = *((__u8 *) skb->data);
350 void *sent;
351
352 BT_DBG("%s status 0x%2.2x", hdev->name, status);
353
354 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
355 if (!sent)
356 return;
357
358 hci_dev_lock(hdev);
359
360 if (status == 0)
361 memcpy(hdev->dev_class, sent, 3);
362
363 if (test_bit(HCI_MGMT, &hdev->dev_flags))
364 mgmt_set_class_of_dev_complete(hdev, sent, status);
365
366 hci_dev_unlock(hdev);
367 }
368
369 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
370 {
371 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
372 __u16 setting;
373
374 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
375
376 if (rp->status)
377 return;
378
379 setting = __le16_to_cpu(rp->voice_setting);
380
381 if (hdev->voice_setting == setting)
382 return;
383
384 hdev->voice_setting = setting;
385
386 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
387
388 if (hdev->notify)
389 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
390 }
391
392 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
393 struct sk_buff *skb)
394 {
395 __u8 status = *((__u8 *) skb->data);
396 __u16 setting;
397 void *sent;
398
399 BT_DBG("%s status 0x%2.2x", hdev->name, status);
400
401 if (status)
402 return;
403
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
405 if (!sent)
406 return;
407
408 setting = get_unaligned_le16(sent);
409
410 if (hdev->voice_setting == setting)
411 return;
412
413 hdev->voice_setting = setting;
414
415 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
416
417 if (hdev->notify)
418 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
419 }
420
421 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
422 {
423 __u8 status = *((__u8 *) skb->data);
424 struct hci_cp_write_ssp_mode *sent;
425
426 BT_DBG("%s status 0x%2.2x", hdev->name, status);
427
428 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
429 if (!sent)
430 return;
431
432 if (!status) {
433 if (sent->mode)
434 hdev->host_features[0] |= LMP_HOST_SSP;
435 else
436 hdev->host_features[0] &= ~LMP_HOST_SSP;
437 }
438
439 if (test_bit(HCI_MGMT, &hdev->dev_flags))
440 mgmt_ssp_enable_complete(hdev, sent->mode, status);
441 else if (!status) {
442 if (sent->mode)
443 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
444 else
445 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
446 }
447 }
448
449 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
450 {
451 struct hci_rp_read_local_version *rp = (void *) skb->data;
452
453 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
454
455 if (rp->status)
456 return;
457
458 hdev->hci_ver = rp->hci_ver;
459 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
460 hdev->lmp_ver = rp->lmp_ver;
461 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
462 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
463
464 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
465 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
466 }
467
468 static void hci_cc_read_local_commands(struct hci_dev *hdev,
469 struct sk_buff *skb)
470 {
471 struct hci_rp_read_local_commands *rp = (void *) skb->data;
472
473 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
474
475 if (!rp->status)
476 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
477 }
478
479 static void hci_cc_read_local_features(struct hci_dev *hdev,
480 struct sk_buff *skb)
481 {
482 struct hci_rp_read_local_features *rp = (void *) skb->data;
483
484 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
485
486 if (rp->status)
487 return;
488
489 memcpy(hdev->features, rp->features, 8);
490
491 /* Adjust default settings according to features
492 * supported by device. */
493
494 if (hdev->features[0] & LMP_3SLOT)
495 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
496
497 if (hdev->features[0] & LMP_5SLOT)
498 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
499
500 if (hdev->features[1] & LMP_HV2) {
501 hdev->pkt_type |= (HCI_HV2);
502 hdev->esco_type |= (ESCO_HV2);
503 }
504
505 if (hdev->features[1] & LMP_HV3) {
506 hdev->pkt_type |= (HCI_HV3);
507 hdev->esco_type |= (ESCO_HV3);
508 }
509
510 if (lmp_esco_capable(hdev))
511 hdev->esco_type |= (ESCO_EV3);
512
513 if (hdev->features[4] & LMP_EV4)
514 hdev->esco_type |= (ESCO_EV4);
515
516 if (hdev->features[4] & LMP_EV5)
517 hdev->esco_type |= (ESCO_EV5);
518
519 if (hdev->features[5] & LMP_EDR_ESCO_2M)
520 hdev->esco_type |= (ESCO_2EV3);
521
522 if (hdev->features[5] & LMP_EDR_ESCO_3M)
523 hdev->esco_type |= (ESCO_3EV3);
524
525 if (hdev->features[5] & LMP_EDR_3S_ESCO)
526 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
527
528 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
529 hdev->features[0], hdev->features[1],
530 hdev->features[2], hdev->features[3],
531 hdev->features[4], hdev->features[5],
532 hdev->features[6], hdev->features[7]);
533 }
534
535 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
536 struct sk_buff *skb)
537 {
538 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
539
540 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
541
542 if (rp->status)
543 return;
544
545 switch (rp->page) {
546 case 0:
547 memcpy(hdev->features, rp->features, 8);
548 break;
549 case 1:
550 memcpy(hdev->host_features, rp->features, 8);
551 break;
552 }
553 }
554
555 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
556 struct sk_buff *skb)
557 {
558 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
559
560 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
561
562 if (!rp->status)
563 hdev->flow_ctl_mode = rp->mode;
564 }
565
566 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
567 {
568 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
569
570 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
571
572 if (rp->status)
573 return;
574
575 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
576 hdev->sco_mtu = rp->sco_mtu;
577 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
578 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
579
580 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
581 hdev->sco_mtu = 64;
582 hdev->sco_pkts = 8;
583 }
584
585 hdev->acl_cnt = hdev->acl_pkts;
586 hdev->sco_cnt = hdev->sco_pkts;
587
588 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
589 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
590 }
591
592 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
593 {
594 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
595
596 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
597
598 if (!rp->status)
599 bacpy(&hdev->bdaddr, &rp->bdaddr);
600 }
601
602 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
603 struct sk_buff *skb)
604 {
605 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
606
607 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
608
609 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
610 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
611 hdev->page_scan_window = __le16_to_cpu(rp->window);
612 }
613 }
614
615 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
616 struct sk_buff *skb)
617 {
618 u8 status = *((u8 *) skb->data);
619 struct hci_cp_write_page_scan_activity *sent;
620
621 BT_DBG("%s status 0x%2.2x", hdev->name, status);
622
623 if (status)
624 return;
625
626 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
627 if (!sent)
628 return;
629
630 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
631 hdev->page_scan_window = __le16_to_cpu(sent->window);
632 }
633
634 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
635 struct sk_buff *skb)
636 {
637 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
638
639 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
640
641 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
642 hdev->page_scan_type = rp->type;
643 }
644
645 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
646 struct sk_buff *skb)
647 {
648 u8 status = *((u8 *) skb->data);
649 u8 *type;
650
651 BT_DBG("%s status 0x%2.2x", hdev->name, status);
652
653 if (status)
654 return;
655
656 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
657 if (type)
658 hdev->page_scan_type = *type;
659 }
660
661 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
662 struct sk_buff *skb)
663 {
664 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
665
666 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
667
668 if (rp->status)
669 return;
670
671 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
672 hdev->block_len = __le16_to_cpu(rp->block_len);
673 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
674
675 hdev->block_cnt = hdev->num_blocks;
676
677 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
678 hdev->block_cnt, hdev->block_len);
679 }
680
681 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
682 struct sk_buff *skb)
683 {
684 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
685
686 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
687
688 if (rp->status)
689 goto a2mp_rsp;
690
691 hdev->amp_status = rp->amp_status;
692 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
693 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
694 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
695 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
696 hdev->amp_type = rp->amp_type;
697 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
698 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
699 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
700 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
701
702 a2mp_rsp:
703 a2mp_send_getinfo_rsp(hdev);
704 }
705
706 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
707 struct sk_buff *skb)
708 {
709 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
710 struct amp_assoc *assoc = &hdev->loc_assoc;
711 size_t rem_len, frag_len;
712
713 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
714
715 if (rp->status)
716 goto a2mp_rsp;
717
718 frag_len = skb->len - sizeof(*rp);
719 rem_len = __le16_to_cpu(rp->rem_len);
720
721 if (rem_len > frag_len) {
722 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
723
724 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
725 assoc->offset += frag_len;
726
727 /* Read other fragments */
728 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
729
730 return;
731 }
732
733 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
734 assoc->len = assoc->offset + rem_len;
735 assoc->offset = 0;
736
737 a2mp_rsp:
738 /* Send A2MP Rsp when all fragments are received */
739 a2mp_send_getampassoc_rsp(hdev, rp->status);
740 a2mp_send_create_phy_link_req(hdev, rp->status);
741 }
742
743 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
744 struct sk_buff *skb)
745 {
746 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
747
748 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
749
750 if (!rp->status)
751 hdev->inq_tx_power = rp->tx_power;
752 }
753
754 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
755 {
756 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
757 struct hci_cp_pin_code_reply *cp;
758 struct hci_conn *conn;
759
760 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
761
762 hci_dev_lock(hdev);
763
764 if (test_bit(HCI_MGMT, &hdev->dev_flags))
765 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
766
767 if (rp->status)
768 goto unlock;
769
770 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
771 if (!cp)
772 goto unlock;
773
774 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
775 if (conn)
776 conn->pin_length = cp->pin_len;
777
778 unlock:
779 hci_dev_unlock(hdev);
780 }
781
782 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
783 {
784 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
785
786 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
787
788 hci_dev_lock(hdev);
789
790 if (test_bit(HCI_MGMT, &hdev->dev_flags))
791 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
792 rp->status);
793
794 hci_dev_unlock(hdev);
795 }
796
797 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
798 struct sk_buff *skb)
799 {
800 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
801
802 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
803
804 if (rp->status)
805 return;
806
807 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
808 hdev->le_pkts = rp->le_max_pkt;
809
810 hdev->le_cnt = hdev->le_pkts;
811
812 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
813 }
814
815 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
816 struct sk_buff *skb)
817 {
818 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
819
820 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
821
822 if (!rp->status)
823 memcpy(hdev->le_features, rp->features, 8);
824 }
825
826 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
827 struct sk_buff *skb)
828 {
829 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
830
831 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
832
833 if (!rp->status)
834 hdev->adv_tx_power = rp->tx_power;
835 }
836
837 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
838 {
839 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
840
841 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
842
843 hci_dev_lock(hdev);
844
845 if (test_bit(HCI_MGMT, &hdev->dev_flags))
846 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
847 rp->status);
848
849 hci_dev_unlock(hdev);
850 }
851
852 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
853 struct sk_buff *skb)
854 {
855 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
856
857 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
858
859 hci_dev_lock(hdev);
860
861 if (test_bit(HCI_MGMT, &hdev->dev_flags))
862 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
863 ACL_LINK, 0, rp->status);
864
865 hci_dev_unlock(hdev);
866 }
867
868 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
869 {
870 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
871
872 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
873
874 hci_dev_lock(hdev);
875
876 if (test_bit(HCI_MGMT, &hdev->dev_flags))
877 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
878 0, rp->status);
879
880 hci_dev_unlock(hdev);
881 }
882
883 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
884 struct sk_buff *skb)
885 {
886 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
887
888 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
889
890 hci_dev_lock(hdev);
891
892 if (test_bit(HCI_MGMT, &hdev->dev_flags))
893 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
894 ACL_LINK, 0, rp->status);
895
896 hci_dev_unlock(hdev);
897 }
898
899 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
900 struct sk_buff *skb)
901 {
902 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
903
904 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
905
906 hci_dev_lock(hdev);
907 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
908 rp->randomizer, rp->status);
909 hci_dev_unlock(hdev);
910 }
911
912 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
913 {
914 __u8 *sent, status = *((__u8 *) skb->data);
915
916 BT_DBG("%s status 0x%2.2x", hdev->name, status);
917
918 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
919 if (!sent)
920 return;
921
922 hci_dev_lock(hdev);
923
924 if (!status) {
925 if (*sent)
926 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
927 else
928 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
929 }
930
931 if (!test_bit(HCI_INIT, &hdev->flags)) {
932 struct hci_request req;
933
934 hci_req_init(&req, hdev);
935 hci_update_ad(&req);
936 hci_req_run(&req, NULL);
937 }
938
939 hci_dev_unlock(hdev);
940 }
941
942 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
943 {
944 __u8 status = *((__u8 *) skb->data);
945
946 BT_DBG("%s status 0x%2.2x", hdev->name, status);
947
948 if (status) {
949 hci_dev_lock(hdev);
950 mgmt_start_discovery_failed(hdev, status);
951 hci_dev_unlock(hdev);
952 return;
953 }
954 }
955
956 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
957 struct sk_buff *skb)
958 {
959 struct hci_cp_le_set_scan_enable *cp;
960 __u8 status = *((__u8 *) skb->data);
961
962 BT_DBG("%s status 0x%2.2x", hdev->name, status);
963
964 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
965 if (!cp)
966 return;
967
968 switch (cp->enable) {
969 case LE_SCANNING_ENABLED:
970 if (status) {
971 hci_dev_lock(hdev);
972 mgmt_start_discovery_failed(hdev, status);
973 hci_dev_unlock(hdev);
974 return;
975 }
976
977 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
978
979 hci_dev_lock(hdev);
980 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
981 hci_dev_unlock(hdev);
982 break;
983
984 case LE_SCANNING_DISABLED:
985 if (status) {
986 hci_dev_lock(hdev);
987 mgmt_stop_discovery_failed(hdev, status);
988 hci_dev_unlock(hdev);
989 return;
990 }
991
992 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
993
994 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
995 hdev->discovery.state == DISCOVERY_FINDING) {
996 mgmt_interleaved_discovery(hdev);
997 } else {
998 hci_dev_lock(hdev);
999 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1000 hci_dev_unlock(hdev);
1001 }
1002
1003 break;
1004
1005 default:
1006 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1007 break;
1008 }
1009 }
1010
1011 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1012 struct sk_buff *skb)
1013 {
1014 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1015
1016 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1017
1018 if (!rp->status)
1019 hdev->le_white_list_size = rp->size;
1020 }
1021
1022 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1023 struct sk_buff *skb)
1024 {
1025 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1026
1027 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1028
1029 if (!rp->status)
1030 memcpy(hdev->le_states, rp->le_states, 8);
1031 }
1032
1033 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1034 struct sk_buff *skb)
1035 {
1036 struct hci_cp_write_le_host_supported *sent;
1037 __u8 status = *((__u8 *) skb->data);
1038
1039 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1040
1041 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1042 if (!sent)
1043 return;
1044
1045 if (!status) {
1046 if (sent->le)
1047 hdev->host_features[0] |= LMP_HOST_LE;
1048 else
1049 hdev->host_features[0] &= ~LMP_HOST_LE;
1050
1051 if (sent->simul)
1052 hdev->host_features[0] |= LMP_HOST_LE_BREDR;
1053 else
1054 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
1055 }
1056
1057 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1058 !test_bit(HCI_INIT, &hdev->flags))
1059 mgmt_le_enable_complete(hdev, sent->le, status);
1060 }
1061
1062 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1063 struct sk_buff *skb)
1064 {
1065 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1066
1067 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1068 hdev->name, rp->status, rp->phy_handle);
1069
1070 if (rp->status)
1071 return;
1072
1073 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1074 }
1075
1076 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1077 {
1078 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1079
1080 if (status) {
1081 hci_conn_check_pending(hdev);
1082 hci_dev_lock(hdev);
1083 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1084 mgmt_start_discovery_failed(hdev, status);
1085 hci_dev_unlock(hdev);
1086 return;
1087 }
1088
1089 set_bit(HCI_INQUIRY, &hdev->flags);
1090
1091 hci_dev_lock(hdev);
1092 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1093 hci_dev_unlock(hdev);
1094 }
1095
1096 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1097 {
1098 struct hci_cp_create_conn *cp;
1099 struct hci_conn *conn;
1100
1101 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1102
1103 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1104 if (!cp)
1105 return;
1106
1107 hci_dev_lock(hdev);
1108
1109 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1110
1111 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1112
1113 if (status) {
1114 if (conn && conn->state == BT_CONNECT) {
1115 if (status != 0x0c || conn->attempt > 2) {
1116 conn->state = BT_CLOSED;
1117 hci_proto_connect_cfm(conn, status);
1118 hci_conn_del(conn);
1119 } else
1120 conn->state = BT_CONNECT2;
1121 }
1122 } else {
1123 if (!conn) {
1124 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1125 if (conn) {
1126 conn->out = true;
1127 conn->link_mode |= HCI_LM_MASTER;
1128 } else
1129 BT_ERR("No memory for new connection");
1130 }
1131 }
1132
1133 hci_dev_unlock(hdev);
1134 }
1135
1136 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1137 {
1138 struct hci_cp_add_sco *cp;
1139 struct hci_conn *acl, *sco;
1140 __u16 handle;
1141
1142 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1143
1144 if (!status)
1145 return;
1146
1147 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1148 if (!cp)
1149 return;
1150
1151 handle = __le16_to_cpu(cp->handle);
1152
1153 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1154
1155 hci_dev_lock(hdev);
1156
1157 acl = hci_conn_hash_lookup_handle(hdev, handle);
1158 if (acl) {
1159 sco = acl->link;
1160 if (sco) {
1161 sco->state = BT_CLOSED;
1162
1163 hci_proto_connect_cfm(sco, status);
1164 hci_conn_del(sco);
1165 }
1166 }
1167
1168 hci_dev_unlock(hdev);
1169 }
1170
1171 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1172 {
1173 struct hci_cp_auth_requested *cp;
1174 struct hci_conn *conn;
1175
1176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1177
1178 if (!status)
1179 return;
1180
1181 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1182 if (!cp)
1183 return;
1184
1185 hci_dev_lock(hdev);
1186
1187 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1188 if (conn) {
1189 if (conn->state == BT_CONFIG) {
1190 hci_proto_connect_cfm(conn, status);
1191 hci_conn_put(conn);
1192 }
1193 }
1194
1195 hci_dev_unlock(hdev);
1196 }
1197
1198 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1199 {
1200 struct hci_cp_set_conn_encrypt *cp;
1201 struct hci_conn *conn;
1202
1203 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1204
1205 if (!status)
1206 return;
1207
1208 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1209 if (!cp)
1210 return;
1211
1212 hci_dev_lock(hdev);
1213
1214 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1215 if (conn) {
1216 if (conn->state == BT_CONFIG) {
1217 hci_proto_connect_cfm(conn, status);
1218 hci_conn_put(conn);
1219 }
1220 }
1221
1222 hci_dev_unlock(hdev);
1223 }
1224
1225 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1226 struct hci_conn *conn)
1227 {
1228 if (conn->state != BT_CONFIG || !conn->out)
1229 return 0;
1230
1231 if (conn->pending_sec_level == BT_SECURITY_SDP)
1232 return 0;
1233
1234 /* Only request authentication for SSP connections or non-SSP
1235 * devices with sec_level HIGH or if MITM protection is requested */
1236 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1237 conn->pending_sec_level != BT_SECURITY_HIGH)
1238 return 0;
1239
1240 return 1;
1241 }
1242
1243 static int hci_resolve_name(struct hci_dev *hdev,
1244 struct inquiry_entry *e)
1245 {
1246 struct hci_cp_remote_name_req cp;
1247
1248 memset(&cp, 0, sizeof(cp));
1249
1250 bacpy(&cp.bdaddr, &e->data.bdaddr);
1251 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1252 cp.pscan_mode = e->data.pscan_mode;
1253 cp.clock_offset = e->data.clock_offset;
1254
1255 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1256 }
1257
1258 static bool hci_resolve_next_name(struct hci_dev *hdev)
1259 {
1260 struct discovery_state *discov = &hdev->discovery;
1261 struct inquiry_entry *e;
1262
1263 if (list_empty(&discov->resolve))
1264 return false;
1265
1266 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1267 if (!e)
1268 return false;
1269
1270 if (hci_resolve_name(hdev, e) == 0) {
1271 e->name_state = NAME_PENDING;
1272 return true;
1273 }
1274
1275 return false;
1276 }
1277
1278 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1279 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1280 {
1281 struct discovery_state *discov = &hdev->discovery;
1282 struct inquiry_entry *e;
1283
1284 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1285 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1286 name_len, conn->dev_class);
1287
1288 if (discov->state == DISCOVERY_STOPPED)
1289 return;
1290
1291 if (discov->state == DISCOVERY_STOPPING)
1292 goto discov_complete;
1293
1294 if (discov->state != DISCOVERY_RESOLVING)
1295 return;
1296
1297 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1298 /* If the device was not found in a list of found devices names of which
1299 * are pending. there is no need to continue resolving a next name as it
1300 * will be done upon receiving another Remote Name Request Complete
1301 * Event */
1302 if (!e)
1303 return;
1304
1305 list_del(&e->list);
1306 if (name) {
1307 e->name_state = NAME_KNOWN;
1308 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1309 e->data.rssi, name, name_len);
1310 } else {
1311 e->name_state = NAME_NOT_KNOWN;
1312 }
1313
1314 if (hci_resolve_next_name(hdev))
1315 return;
1316
1317 discov_complete:
1318 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1319 }
1320
1321 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1322 {
1323 struct hci_cp_remote_name_req *cp;
1324 struct hci_conn *conn;
1325
1326 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1327
1328 /* If successful wait for the name req complete event before
1329 * checking for the need to do authentication */
1330 if (!status)
1331 return;
1332
1333 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1334 if (!cp)
1335 return;
1336
1337 hci_dev_lock(hdev);
1338
1339 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1340
1341 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1342 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1343
1344 if (!conn)
1345 goto unlock;
1346
1347 if (!hci_outgoing_auth_needed(hdev, conn))
1348 goto unlock;
1349
1350 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1351 struct hci_cp_auth_requested cp;
1352 cp.handle = __cpu_to_le16(conn->handle);
1353 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1354 }
1355
1356 unlock:
1357 hci_dev_unlock(hdev);
1358 }
1359
1360 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1361 {
1362 struct hci_cp_read_remote_features *cp;
1363 struct hci_conn *conn;
1364
1365 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1366
1367 if (!status)
1368 return;
1369
1370 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1371 if (!cp)
1372 return;
1373
1374 hci_dev_lock(hdev);
1375
1376 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1377 if (conn) {
1378 if (conn->state == BT_CONFIG) {
1379 hci_proto_connect_cfm(conn, status);
1380 hci_conn_put(conn);
1381 }
1382 }
1383
1384 hci_dev_unlock(hdev);
1385 }
1386
1387 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1388 {
1389 struct hci_cp_read_remote_ext_features *cp;
1390 struct hci_conn *conn;
1391
1392 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1393
1394 if (!status)
1395 return;
1396
1397 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1398 if (!cp)
1399 return;
1400
1401 hci_dev_lock(hdev);
1402
1403 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1404 if (conn) {
1405 if (conn->state == BT_CONFIG) {
1406 hci_proto_connect_cfm(conn, status);
1407 hci_conn_put(conn);
1408 }
1409 }
1410
1411 hci_dev_unlock(hdev);
1412 }
1413
1414 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1415 {
1416 struct hci_cp_setup_sync_conn *cp;
1417 struct hci_conn *acl, *sco;
1418 __u16 handle;
1419
1420 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1421
1422 if (!status)
1423 return;
1424
1425 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1426 if (!cp)
1427 return;
1428
1429 handle = __le16_to_cpu(cp->handle);
1430
1431 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1432
1433 hci_dev_lock(hdev);
1434
1435 acl = hci_conn_hash_lookup_handle(hdev, handle);
1436 if (acl) {
1437 sco = acl->link;
1438 if (sco) {
1439 sco->state = BT_CLOSED;
1440
1441 hci_proto_connect_cfm(sco, status);
1442 hci_conn_del(sco);
1443 }
1444 }
1445
1446 hci_dev_unlock(hdev);
1447 }
1448
1449 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1450 {
1451 struct hci_cp_sniff_mode *cp;
1452 struct hci_conn *conn;
1453
1454 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1455
1456 if (!status)
1457 return;
1458
1459 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1460 if (!cp)
1461 return;
1462
1463 hci_dev_lock(hdev);
1464
1465 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1466 if (conn) {
1467 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1468
1469 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1470 hci_sco_setup(conn, status);
1471 }
1472
1473 hci_dev_unlock(hdev);
1474 }
1475
1476 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1477 {
1478 struct hci_cp_exit_sniff_mode *cp;
1479 struct hci_conn *conn;
1480
1481 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1482
1483 if (!status)
1484 return;
1485
1486 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1487 if (!cp)
1488 return;
1489
1490 hci_dev_lock(hdev);
1491
1492 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1493 if (conn) {
1494 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1495
1496 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1497 hci_sco_setup(conn, status);
1498 }
1499
1500 hci_dev_unlock(hdev);
1501 }
1502
1503 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1504 {
1505 struct hci_cp_disconnect *cp;
1506 struct hci_conn *conn;
1507
1508 if (!status)
1509 return;
1510
1511 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1512 if (!cp)
1513 return;
1514
1515 hci_dev_lock(hdev);
1516
1517 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1518 if (conn)
1519 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1520 conn->dst_type, status);
1521
1522 hci_dev_unlock(hdev);
1523 }
1524
1525 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1526 {
1527 struct hci_conn *conn;
1528
1529 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1530
1531 if (status) {
1532 hci_dev_lock(hdev);
1533
1534 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1535 if (!conn) {
1536 hci_dev_unlock(hdev);
1537 return;
1538 }
1539
1540 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1541
1542 conn->state = BT_CLOSED;
1543 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1544 conn->dst_type, status);
1545 hci_proto_connect_cfm(conn, status);
1546 hci_conn_del(conn);
1547
1548 hci_dev_unlock(hdev);
1549 }
1550 }
1551
1552 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1553 {
1554 struct hci_cp_create_phy_link *cp;
1555
1556 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1557
1558 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1559 if (!cp)
1560 return;
1561
1562 hci_dev_lock(hdev);
1563
1564 if (status) {
1565 struct hci_conn *hcon;
1566
1567 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1568 if (hcon)
1569 hci_conn_del(hcon);
1570 } else {
1571 amp_write_remote_assoc(hdev, cp->phy_handle);
1572 }
1573
1574 hci_dev_unlock(hdev);
1575 }
1576
1577 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1578 {
1579 struct hci_cp_accept_phy_link *cp;
1580
1581 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1582
1583 if (status)
1584 return;
1585
1586 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1587 if (!cp)
1588 return;
1589
1590 amp_write_remote_assoc(hdev, cp->phy_handle);
1591 }
1592
1593 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1594 {
1595 __u8 status = *((__u8 *) skb->data);
1596 struct discovery_state *discov = &hdev->discovery;
1597 struct inquiry_entry *e;
1598
1599 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1600
1601 hci_conn_check_pending(hdev);
1602
1603 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1604 return;
1605
1606 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1607 return;
1608
1609 hci_dev_lock(hdev);
1610
1611 if (discov->state != DISCOVERY_FINDING)
1612 goto unlock;
1613
1614 if (list_empty(&discov->resolve)) {
1615 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1616 goto unlock;
1617 }
1618
1619 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1620 if (e && hci_resolve_name(hdev, e) == 0) {
1621 e->name_state = NAME_PENDING;
1622 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1623 } else {
1624 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1625 }
1626
1627 unlock:
1628 hci_dev_unlock(hdev);
1629 }
1630
1631 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1632 {
1633 struct inquiry_data data;
1634 struct inquiry_info *info = (void *) (skb->data + 1);
1635 int num_rsp = *((__u8 *) skb->data);
1636
1637 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1638
1639 if (!num_rsp)
1640 return;
1641
1642 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1643 return;
1644
1645 hci_dev_lock(hdev);
1646
1647 for (; num_rsp; num_rsp--, info++) {
1648 bool name_known, ssp;
1649
1650 bacpy(&data.bdaddr, &info->bdaddr);
1651 data.pscan_rep_mode = info->pscan_rep_mode;
1652 data.pscan_period_mode = info->pscan_period_mode;
1653 data.pscan_mode = info->pscan_mode;
1654 memcpy(data.dev_class, info->dev_class, 3);
1655 data.clock_offset = info->clock_offset;
1656 data.rssi = 0x00;
1657 data.ssp_mode = 0x00;
1658
1659 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1660 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1661 info->dev_class, 0, !name_known, ssp, NULL,
1662 0);
1663 }
1664
1665 hci_dev_unlock(hdev);
1666 }
1667
1668 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1669 {
1670 struct hci_ev_conn_complete *ev = (void *) skb->data;
1671 struct hci_conn *conn;
1672
1673 BT_DBG("%s", hdev->name);
1674
1675 hci_dev_lock(hdev);
1676
1677 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1678 if (!conn) {
1679 if (ev->link_type != SCO_LINK)
1680 goto unlock;
1681
1682 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1683 if (!conn)
1684 goto unlock;
1685
1686 conn->type = SCO_LINK;
1687 }
1688
1689 if (!ev->status) {
1690 conn->handle = __le16_to_cpu(ev->handle);
1691
1692 if (conn->type == ACL_LINK) {
1693 conn->state = BT_CONFIG;
1694 hci_conn_hold(conn);
1695
1696 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1697 !hci_find_link_key(hdev, &ev->bdaddr))
1698 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1699 else
1700 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1701 } else
1702 conn->state = BT_CONNECTED;
1703
1704 hci_conn_hold_device(conn);
1705 hci_conn_add_sysfs(conn);
1706
1707 if (test_bit(HCI_AUTH, &hdev->flags))
1708 conn->link_mode |= HCI_LM_AUTH;
1709
1710 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1711 conn->link_mode |= HCI_LM_ENCRYPT;
1712
1713 /* Get remote features */
1714 if (conn->type == ACL_LINK) {
1715 struct hci_cp_read_remote_features cp;
1716 cp.handle = ev->handle;
1717 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1718 sizeof(cp), &cp);
1719 }
1720
1721 /* Set packet type for incoming connection */
1722 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1723 struct hci_cp_change_conn_ptype cp;
1724 cp.handle = ev->handle;
1725 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1726 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1727 &cp);
1728 }
1729 } else {
1730 conn->state = BT_CLOSED;
1731 if (conn->type == ACL_LINK)
1732 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1733 conn->dst_type, ev->status);
1734 }
1735
1736 if (conn->type == ACL_LINK)
1737 hci_sco_setup(conn, ev->status);
1738
1739 if (ev->status) {
1740 hci_proto_connect_cfm(conn, ev->status);
1741 hci_conn_del(conn);
1742 } else if (ev->link_type != ACL_LINK)
1743 hci_proto_connect_cfm(conn, ev->status);
1744
1745 unlock:
1746 hci_dev_unlock(hdev);
1747
1748 hci_conn_check_pending(hdev);
1749 }
1750
1751 void hci_conn_accept(struct hci_conn *conn, int mask)
1752 {
1753 struct hci_dev *hdev = conn->hdev;
1754
1755 BT_DBG("conn %p", conn);
1756
1757 conn->state = BT_CONFIG;
1758
1759 if (!lmp_esco_capable(hdev)) {
1760 struct hci_cp_accept_conn_req cp;
1761
1762 bacpy(&cp.bdaddr, &conn->dst);
1763
1764 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1765 cp.role = 0x00; /* Become master */
1766 else
1767 cp.role = 0x01; /* Remain slave */
1768
1769 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
1770 } else /* lmp_esco_capable(hdev)) */ {
1771 struct hci_cp_accept_sync_conn_req cp;
1772
1773 bacpy(&cp.bdaddr, &conn->dst);
1774 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1775
1776 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1777 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1778 cp.max_latency = __constant_cpu_to_le16(0xffff);
1779 cp.content_format = cpu_to_le16(hdev->voice_setting);
1780 cp.retrans_effort = 0xff;
1781
1782 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1783 sizeof(cp), &cp);
1784 }
1785 }
1786
1787 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1788 {
1789 struct hci_ev_conn_request *ev = (void *) skb->data;
1790 int mask = hdev->link_mode;
1791 __u8 flags = 0;
1792
1793 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1794 ev->link_type);
1795
1796 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1797 &flags);
1798
1799 if ((mask & HCI_LM_ACCEPT) &&
1800 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1801 /* Connection accepted */
1802 struct inquiry_entry *ie;
1803 struct hci_conn *conn;
1804
1805 hci_dev_lock(hdev);
1806
1807 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1808 if (ie)
1809 memcpy(ie->data.dev_class, ev->dev_class, 3);
1810
1811 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1812 &ev->bdaddr);
1813 if (!conn) {
1814 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1815 if (!conn) {
1816 BT_ERR("No memory for new connection");
1817 hci_dev_unlock(hdev);
1818 return;
1819 }
1820 }
1821
1822 memcpy(conn->dev_class, ev->dev_class, 3);
1823
1824 hci_dev_unlock(hdev);
1825
1826 if (ev->link_type == ACL_LINK ||
1827 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1828 struct hci_cp_accept_conn_req cp;
1829 conn->state = BT_CONNECT;
1830
1831 bacpy(&cp.bdaddr, &ev->bdaddr);
1832
1833 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1834 cp.role = 0x00; /* Become master */
1835 else
1836 cp.role = 0x01; /* Remain slave */
1837
1838 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1839 &cp);
1840 } else if (!(flags & HCI_PROTO_DEFER)) {
1841 struct hci_cp_accept_sync_conn_req cp;
1842 conn->state = BT_CONNECT;
1843
1844 bacpy(&cp.bdaddr, &ev->bdaddr);
1845 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1846
1847 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1848 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1849 cp.max_latency = __constant_cpu_to_le16(0xffff);
1850 cp.content_format = cpu_to_le16(hdev->voice_setting);
1851 cp.retrans_effort = 0xff;
1852
1853 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1854 sizeof(cp), &cp);
1855 } else {
1856 conn->state = BT_CONNECT2;
1857 hci_proto_connect_cfm(conn, 0);
1858 hci_conn_put(conn);
1859 }
1860 } else {
1861 /* Connection rejected */
1862 struct hci_cp_reject_conn_req cp;
1863
1864 bacpy(&cp.bdaddr, &ev->bdaddr);
1865 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1866 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1867 }
1868 }
1869
1870 static u8 hci_to_mgmt_reason(u8 err)
1871 {
1872 switch (err) {
1873 case HCI_ERROR_CONNECTION_TIMEOUT:
1874 return MGMT_DEV_DISCONN_TIMEOUT;
1875 case HCI_ERROR_REMOTE_USER_TERM:
1876 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1877 case HCI_ERROR_REMOTE_POWER_OFF:
1878 return MGMT_DEV_DISCONN_REMOTE;
1879 case HCI_ERROR_LOCAL_HOST_TERM:
1880 return MGMT_DEV_DISCONN_LOCAL_HOST;
1881 default:
1882 return MGMT_DEV_DISCONN_UNKNOWN;
1883 }
1884 }
1885
1886 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1887 {
1888 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1889 struct hci_conn *conn;
1890
1891 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1892
1893 hci_dev_lock(hdev);
1894
1895 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1896 if (!conn)
1897 goto unlock;
1898
1899 if (ev->status == 0)
1900 conn->state = BT_CLOSED;
1901
1902 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1903 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1904 if (ev->status) {
1905 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1906 conn->dst_type, ev->status);
1907 } else {
1908 u8 reason = hci_to_mgmt_reason(ev->reason);
1909
1910 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1911 conn->dst_type, reason);
1912 }
1913 }
1914
1915 if (ev->status == 0) {
1916 if (conn->type == ACL_LINK && conn->flush_key)
1917 hci_remove_link_key(hdev, &conn->dst);
1918 hci_proto_disconn_cfm(conn, ev->reason);
1919 hci_conn_del(conn);
1920 }
1921
1922 unlock:
1923 hci_dev_unlock(hdev);
1924 }
1925
1926 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1927 {
1928 struct hci_ev_auth_complete *ev = (void *) skb->data;
1929 struct hci_conn *conn;
1930
1931 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1932
1933 hci_dev_lock(hdev);
1934
1935 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1936 if (!conn)
1937 goto unlock;
1938
1939 if (!ev->status) {
1940 if (!hci_conn_ssp_enabled(conn) &&
1941 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1942 BT_INFO("re-auth of legacy device is not possible.");
1943 } else {
1944 conn->link_mode |= HCI_LM_AUTH;
1945 conn->sec_level = conn->pending_sec_level;
1946 }
1947 } else {
1948 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1949 ev->status);
1950 }
1951
1952 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1953 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1954
1955 if (conn->state == BT_CONFIG) {
1956 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1957 struct hci_cp_set_conn_encrypt cp;
1958 cp.handle = ev->handle;
1959 cp.encrypt = 0x01;
1960 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1961 &cp);
1962 } else {
1963 conn->state = BT_CONNECTED;
1964 hci_proto_connect_cfm(conn, ev->status);
1965 hci_conn_put(conn);
1966 }
1967 } else {
1968 hci_auth_cfm(conn, ev->status);
1969
1970 hci_conn_hold(conn);
1971 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1972 hci_conn_put(conn);
1973 }
1974
1975 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1976 if (!ev->status) {
1977 struct hci_cp_set_conn_encrypt cp;
1978 cp.handle = ev->handle;
1979 cp.encrypt = 0x01;
1980 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1981 &cp);
1982 } else {
1983 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1984 hci_encrypt_cfm(conn, ev->status, 0x00);
1985 }
1986 }
1987
1988 unlock:
1989 hci_dev_unlock(hdev);
1990 }
1991
1992 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1993 {
1994 struct hci_ev_remote_name *ev = (void *) skb->data;
1995 struct hci_conn *conn;
1996
1997 BT_DBG("%s", hdev->name);
1998
1999 hci_conn_check_pending(hdev);
2000
2001 hci_dev_lock(hdev);
2002
2003 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2004
2005 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2006 goto check_auth;
2007
2008 if (ev->status == 0)
2009 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2010 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2011 else
2012 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2013
2014 check_auth:
2015 if (!conn)
2016 goto unlock;
2017
2018 if (!hci_outgoing_auth_needed(hdev, conn))
2019 goto unlock;
2020
2021 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2022 struct hci_cp_auth_requested cp;
2023 cp.handle = __cpu_to_le16(conn->handle);
2024 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2025 }
2026
2027 unlock:
2028 hci_dev_unlock(hdev);
2029 }
2030
2031 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2032 {
2033 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2034 struct hci_conn *conn;
2035
2036 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2037
2038 hci_dev_lock(hdev);
2039
2040 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2041 if (conn) {
2042 if (!ev->status) {
2043 if (ev->encrypt) {
2044 /* Encryption implies authentication */
2045 conn->link_mode |= HCI_LM_AUTH;
2046 conn->link_mode |= HCI_LM_ENCRYPT;
2047 conn->sec_level = conn->pending_sec_level;
2048 } else
2049 conn->link_mode &= ~HCI_LM_ENCRYPT;
2050 }
2051
2052 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2053
2054 if (ev->status && conn->state == BT_CONNECTED) {
2055 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2056 hci_conn_put(conn);
2057 goto unlock;
2058 }
2059
2060 if (conn->state == BT_CONFIG) {
2061 if (!ev->status)
2062 conn->state = BT_CONNECTED;
2063
2064 hci_proto_connect_cfm(conn, ev->status);
2065 hci_conn_put(conn);
2066 } else
2067 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2068 }
2069
2070 unlock:
2071 hci_dev_unlock(hdev);
2072 }
2073
2074 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2075 struct sk_buff *skb)
2076 {
2077 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2078 struct hci_conn *conn;
2079
2080 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2081
2082 hci_dev_lock(hdev);
2083
2084 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2085 if (conn) {
2086 if (!ev->status)
2087 conn->link_mode |= HCI_LM_SECURE;
2088
2089 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2090
2091 hci_key_change_cfm(conn, ev->status);
2092 }
2093
2094 hci_dev_unlock(hdev);
2095 }
2096
2097 static void hci_remote_features_evt(struct hci_dev *hdev,
2098 struct sk_buff *skb)
2099 {
2100 struct hci_ev_remote_features *ev = (void *) skb->data;
2101 struct hci_conn *conn;
2102
2103 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2104
2105 hci_dev_lock(hdev);
2106
2107 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2108 if (!conn)
2109 goto unlock;
2110
2111 if (!ev->status)
2112 memcpy(conn->features, ev->features, 8);
2113
2114 if (conn->state != BT_CONFIG)
2115 goto unlock;
2116
2117 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2118 struct hci_cp_read_remote_ext_features cp;
2119 cp.handle = ev->handle;
2120 cp.page = 0x01;
2121 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2122 sizeof(cp), &cp);
2123 goto unlock;
2124 }
2125
2126 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2127 struct hci_cp_remote_name_req cp;
2128 memset(&cp, 0, sizeof(cp));
2129 bacpy(&cp.bdaddr, &conn->dst);
2130 cp.pscan_rep_mode = 0x02;
2131 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2132 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2133 mgmt_device_connected(hdev, &conn->dst, conn->type,
2134 conn->dst_type, 0, NULL, 0,
2135 conn->dev_class);
2136
2137 if (!hci_outgoing_auth_needed(hdev, conn)) {
2138 conn->state = BT_CONNECTED;
2139 hci_proto_connect_cfm(conn, ev->status);
2140 hci_conn_put(conn);
2141 }
2142
2143 unlock:
2144 hci_dev_unlock(hdev);
2145 }
2146
2147 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2148 {
2149 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2150 u8 status = skb->data[sizeof(*ev)];
2151 __u16 opcode;
2152
2153 skb_pull(skb, sizeof(*ev));
2154
2155 opcode = __le16_to_cpu(ev->opcode);
2156
2157 switch (opcode) {
2158 case HCI_OP_INQUIRY_CANCEL:
2159 hci_cc_inquiry_cancel(hdev, skb);
2160 break;
2161
2162 case HCI_OP_PERIODIC_INQ:
2163 hci_cc_periodic_inq(hdev, skb);
2164 break;
2165
2166 case HCI_OP_EXIT_PERIODIC_INQ:
2167 hci_cc_exit_periodic_inq(hdev, skb);
2168 break;
2169
2170 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2171 hci_cc_remote_name_req_cancel(hdev, skb);
2172 break;
2173
2174 case HCI_OP_ROLE_DISCOVERY:
2175 hci_cc_role_discovery(hdev, skb);
2176 break;
2177
2178 case HCI_OP_READ_LINK_POLICY:
2179 hci_cc_read_link_policy(hdev, skb);
2180 break;
2181
2182 case HCI_OP_WRITE_LINK_POLICY:
2183 hci_cc_write_link_policy(hdev, skb);
2184 break;
2185
2186 case HCI_OP_READ_DEF_LINK_POLICY:
2187 hci_cc_read_def_link_policy(hdev, skb);
2188 break;
2189
2190 case HCI_OP_WRITE_DEF_LINK_POLICY:
2191 hci_cc_write_def_link_policy(hdev, skb);
2192 break;
2193
2194 case HCI_OP_RESET:
2195 hci_cc_reset(hdev, skb);
2196 break;
2197
2198 case HCI_OP_WRITE_LOCAL_NAME:
2199 hci_cc_write_local_name(hdev, skb);
2200 break;
2201
2202 case HCI_OP_READ_LOCAL_NAME:
2203 hci_cc_read_local_name(hdev, skb);
2204 break;
2205
2206 case HCI_OP_WRITE_AUTH_ENABLE:
2207 hci_cc_write_auth_enable(hdev, skb);
2208 break;
2209
2210 case HCI_OP_WRITE_ENCRYPT_MODE:
2211 hci_cc_write_encrypt_mode(hdev, skb);
2212 break;
2213
2214 case HCI_OP_WRITE_SCAN_ENABLE:
2215 hci_cc_write_scan_enable(hdev, skb);
2216 break;
2217
2218 case HCI_OP_READ_CLASS_OF_DEV:
2219 hci_cc_read_class_of_dev(hdev, skb);
2220 break;
2221
2222 case HCI_OP_WRITE_CLASS_OF_DEV:
2223 hci_cc_write_class_of_dev(hdev, skb);
2224 break;
2225
2226 case HCI_OP_READ_VOICE_SETTING:
2227 hci_cc_read_voice_setting(hdev, skb);
2228 break;
2229
2230 case HCI_OP_WRITE_VOICE_SETTING:
2231 hci_cc_write_voice_setting(hdev, skb);
2232 break;
2233
2234 case HCI_OP_WRITE_SSP_MODE:
2235 hci_cc_write_ssp_mode(hdev, skb);
2236 break;
2237
2238 case HCI_OP_READ_LOCAL_VERSION:
2239 hci_cc_read_local_version(hdev, skb);
2240 break;
2241
2242 case HCI_OP_READ_LOCAL_COMMANDS:
2243 hci_cc_read_local_commands(hdev, skb);
2244 break;
2245
2246 case HCI_OP_READ_LOCAL_FEATURES:
2247 hci_cc_read_local_features(hdev, skb);
2248 break;
2249
2250 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2251 hci_cc_read_local_ext_features(hdev, skb);
2252 break;
2253
2254 case HCI_OP_READ_BUFFER_SIZE:
2255 hci_cc_read_buffer_size(hdev, skb);
2256 break;
2257
2258 case HCI_OP_READ_BD_ADDR:
2259 hci_cc_read_bd_addr(hdev, skb);
2260 break;
2261
2262 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2263 hci_cc_read_page_scan_activity(hdev, skb);
2264 break;
2265
2266 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2267 hci_cc_write_page_scan_activity(hdev, skb);
2268 break;
2269
2270 case HCI_OP_READ_PAGE_SCAN_TYPE:
2271 hci_cc_read_page_scan_type(hdev, skb);
2272 break;
2273
2274 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2275 hci_cc_write_page_scan_type(hdev, skb);
2276 break;
2277
2278 case HCI_OP_READ_DATA_BLOCK_SIZE:
2279 hci_cc_read_data_block_size(hdev, skb);
2280 break;
2281
2282 case HCI_OP_READ_FLOW_CONTROL_MODE:
2283 hci_cc_read_flow_control_mode(hdev, skb);
2284 break;
2285
2286 case HCI_OP_READ_LOCAL_AMP_INFO:
2287 hci_cc_read_local_amp_info(hdev, skb);
2288 break;
2289
2290 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2291 hci_cc_read_local_amp_assoc(hdev, skb);
2292 break;
2293
2294 case HCI_OP_READ_INQ_RSP_TX_POWER:
2295 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2296 break;
2297
2298 case HCI_OP_PIN_CODE_REPLY:
2299 hci_cc_pin_code_reply(hdev, skb);
2300 break;
2301
2302 case HCI_OP_PIN_CODE_NEG_REPLY:
2303 hci_cc_pin_code_neg_reply(hdev, skb);
2304 break;
2305
2306 case HCI_OP_READ_LOCAL_OOB_DATA:
2307 hci_cc_read_local_oob_data_reply(hdev, skb);
2308 break;
2309
2310 case HCI_OP_LE_READ_BUFFER_SIZE:
2311 hci_cc_le_read_buffer_size(hdev, skb);
2312 break;
2313
2314 case HCI_OP_LE_READ_LOCAL_FEATURES:
2315 hci_cc_le_read_local_features(hdev, skb);
2316 break;
2317
2318 case HCI_OP_LE_READ_ADV_TX_POWER:
2319 hci_cc_le_read_adv_tx_power(hdev, skb);
2320 break;
2321
2322 case HCI_OP_USER_CONFIRM_REPLY:
2323 hci_cc_user_confirm_reply(hdev, skb);
2324 break;
2325
2326 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2327 hci_cc_user_confirm_neg_reply(hdev, skb);
2328 break;
2329
2330 case HCI_OP_USER_PASSKEY_REPLY:
2331 hci_cc_user_passkey_reply(hdev, skb);
2332 break;
2333
2334 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2335 hci_cc_user_passkey_neg_reply(hdev, skb);
2336 break;
2337
2338 case HCI_OP_LE_SET_SCAN_PARAM:
2339 hci_cc_le_set_scan_param(hdev, skb);
2340 break;
2341
2342 case HCI_OP_LE_SET_ADV_ENABLE:
2343 hci_cc_le_set_adv_enable(hdev, skb);
2344 break;
2345
2346 case HCI_OP_LE_SET_SCAN_ENABLE:
2347 hci_cc_le_set_scan_enable(hdev, skb);
2348 break;
2349
2350 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2351 hci_cc_le_read_white_list_size(hdev, skb);
2352 break;
2353
2354 case HCI_OP_LE_READ_SUPPORTED_STATES:
2355 hci_cc_le_read_supported_states(hdev, skb);
2356 break;
2357
2358 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2359 hci_cc_write_le_host_supported(hdev, skb);
2360 break;
2361
2362 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2363 hci_cc_write_remote_amp_assoc(hdev, skb);
2364 break;
2365
2366 default:
2367 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2368 break;
2369 }
2370
2371 if (opcode != HCI_OP_NOP)
2372 del_timer(&hdev->cmd_timer);
2373
2374 hci_req_cmd_complete(hdev, opcode, status);
2375
2376 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2377 atomic_set(&hdev->cmd_cnt, 1);
2378 if (!skb_queue_empty(&hdev->cmd_q))
2379 queue_work(hdev->workqueue, &hdev->cmd_work);
2380 }
2381 }
2382
2383 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2384 {
2385 struct hci_ev_cmd_status *ev = (void *) skb->data;
2386 __u16 opcode;
2387
2388 skb_pull(skb, sizeof(*ev));
2389
2390 opcode = __le16_to_cpu(ev->opcode);
2391
2392 switch (opcode) {
2393 case HCI_OP_INQUIRY:
2394 hci_cs_inquiry(hdev, ev->status);
2395 break;
2396
2397 case HCI_OP_CREATE_CONN:
2398 hci_cs_create_conn(hdev, ev->status);
2399 break;
2400
2401 case HCI_OP_ADD_SCO:
2402 hci_cs_add_sco(hdev, ev->status);
2403 break;
2404
2405 case HCI_OP_AUTH_REQUESTED:
2406 hci_cs_auth_requested(hdev, ev->status);
2407 break;
2408
2409 case HCI_OP_SET_CONN_ENCRYPT:
2410 hci_cs_set_conn_encrypt(hdev, ev->status);
2411 break;
2412
2413 case HCI_OP_REMOTE_NAME_REQ:
2414 hci_cs_remote_name_req(hdev, ev->status);
2415 break;
2416
2417 case HCI_OP_READ_REMOTE_FEATURES:
2418 hci_cs_read_remote_features(hdev, ev->status);
2419 break;
2420
2421 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2422 hci_cs_read_remote_ext_features(hdev, ev->status);
2423 break;
2424
2425 case HCI_OP_SETUP_SYNC_CONN:
2426 hci_cs_setup_sync_conn(hdev, ev->status);
2427 break;
2428
2429 case HCI_OP_SNIFF_MODE:
2430 hci_cs_sniff_mode(hdev, ev->status);
2431 break;
2432
2433 case HCI_OP_EXIT_SNIFF_MODE:
2434 hci_cs_exit_sniff_mode(hdev, ev->status);
2435 break;
2436
2437 case HCI_OP_DISCONNECT:
2438 hci_cs_disconnect(hdev, ev->status);
2439 break;
2440
2441 case HCI_OP_LE_CREATE_CONN:
2442 hci_cs_le_create_conn(hdev, ev->status);
2443 break;
2444
2445 case HCI_OP_CREATE_PHY_LINK:
2446 hci_cs_create_phylink(hdev, ev->status);
2447 break;
2448
2449 case HCI_OP_ACCEPT_PHY_LINK:
2450 hci_cs_accept_phylink(hdev, ev->status);
2451 break;
2452
2453 default:
2454 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2455 break;
2456 }
2457
2458 if (opcode != HCI_OP_NOP)
2459 del_timer(&hdev->cmd_timer);
2460
2461 hci_req_cmd_complete(hdev, opcode, ev->status);
2462
2463 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2464 atomic_set(&hdev->cmd_cnt, 1);
2465 if (!skb_queue_empty(&hdev->cmd_q))
2466 queue_work(hdev->workqueue, &hdev->cmd_work);
2467 }
2468 }
2469
2470 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2471 {
2472 struct hci_ev_role_change *ev = (void *) skb->data;
2473 struct hci_conn *conn;
2474
2475 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2476
2477 hci_dev_lock(hdev);
2478
2479 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2480 if (conn) {
2481 if (!ev->status) {
2482 if (ev->role)
2483 conn->link_mode &= ~HCI_LM_MASTER;
2484 else
2485 conn->link_mode |= HCI_LM_MASTER;
2486 }
2487
2488 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2489
2490 hci_role_switch_cfm(conn, ev->status, ev->role);
2491 }
2492
2493 hci_dev_unlock(hdev);
2494 }
2495
2496 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2497 {
2498 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2499 int i;
2500
2501 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2502 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2503 return;
2504 }
2505
2506 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2507 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2508 BT_DBG("%s bad parameters", hdev->name);
2509 return;
2510 }
2511
2512 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2513
2514 for (i = 0; i < ev->num_hndl; i++) {
2515 struct hci_comp_pkts_info *info = &ev->handles[i];
2516 struct hci_conn *conn;
2517 __u16 handle, count;
2518
2519 handle = __le16_to_cpu(info->handle);
2520 count = __le16_to_cpu(info->count);
2521
2522 conn = hci_conn_hash_lookup_handle(hdev, handle);
2523 if (!conn)
2524 continue;
2525
2526 conn->sent -= count;
2527
2528 switch (conn->type) {
2529 case ACL_LINK:
2530 hdev->acl_cnt += count;
2531 if (hdev->acl_cnt > hdev->acl_pkts)
2532 hdev->acl_cnt = hdev->acl_pkts;
2533 break;
2534
2535 case LE_LINK:
2536 if (hdev->le_pkts) {
2537 hdev->le_cnt += count;
2538 if (hdev->le_cnt > hdev->le_pkts)
2539 hdev->le_cnt = hdev->le_pkts;
2540 } else {
2541 hdev->acl_cnt += count;
2542 if (hdev->acl_cnt > hdev->acl_pkts)
2543 hdev->acl_cnt = hdev->acl_pkts;
2544 }
2545 break;
2546
2547 case SCO_LINK:
2548 hdev->sco_cnt += count;
2549 if (hdev->sco_cnt > hdev->sco_pkts)
2550 hdev->sco_cnt = hdev->sco_pkts;
2551 break;
2552
2553 default:
2554 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2555 break;
2556 }
2557 }
2558
2559 queue_work(hdev->workqueue, &hdev->tx_work);
2560 }
2561
2562 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2563 __u16 handle)
2564 {
2565 struct hci_chan *chan;
2566
2567 switch (hdev->dev_type) {
2568 case HCI_BREDR:
2569 return hci_conn_hash_lookup_handle(hdev, handle);
2570 case HCI_AMP:
2571 chan = hci_chan_lookup_handle(hdev, handle);
2572 if (chan)
2573 return chan->conn;
2574 break;
2575 default:
2576 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2577 break;
2578 }
2579
2580 return NULL;
2581 }
2582
2583 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2584 {
2585 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2586 int i;
2587
2588 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2589 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2590 return;
2591 }
2592
2593 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2594 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2595 BT_DBG("%s bad parameters", hdev->name);
2596 return;
2597 }
2598
2599 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2600 ev->num_hndl);
2601
2602 for (i = 0; i < ev->num_hndl; i++) {
2603 struct hci_comp_blocks_info *info = &ev->handles[i];
2604 struct hci_conn *conn = NULL;
2605 __u16 handle, block_count;
2606
2607 handle = __le16_to_cpu(info->handle);
2608 block_count = __le16_to_cpu(info->blocks);
2609
2610 conn = __hci_conn_lookup_handle(hdev, handle);
2611 if (!conn)
2612 continue;
2613
2614 conn->sent -= block_count;
2615
2616 switch (conn->type) {
2617 case ACL_LINK:
2618 case AMP_LINK:
2619 hdev->block_cnt += block_count;
2620 if (hdev->block_cnt > hdev->num_blocks)
2621 hdev->block_cnt = hdev->num_blocks;
2622 break;
2623
2624 default:
2625 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2626 break;
2627 }
2628 }
2629
2630 queue_work(hdev->workqueue, &hdev->tx_work);
2631 }
2632
2633 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2634 {
2635 struct hci_ev_mode_change *ev = (void *) skb->data;
2636 struct hci_conn *conn;
2637
2638 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2639
2640 hci_dev_lock(hdev);
2641
2642 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2643 if (conn) {
2644 conn->mode = ev->mode;
2645 conn->interval = __le16_to_cpu(ev->interval);
2646
2647 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2648 &conn->flags)) {
2649 if (conn->mode == HCI_CM_ACTIVE)
2650 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2651 else
2652 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2653 }
2654
2655 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2656 hci_sco_setup(conn, ev->status);
2657 }
2658
2659 hci_dev_unlock(hdev);
2660 }
2661
2662 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2663 {
2664 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2665 struct hci_conn *conn;
2666
2667 BT_DBG("%s", hdev->name);
2668
2669 hci_dev_lock(hdev);
2670
2671 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2672 if (!conn)
2673 goto unlock;
2674
2675 if (conn->state == BT_CONNECTED) {
2676 hci_conn_hold(conn);
2677 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2678 hci_conn_put(conn);
2679 }
2680
2681 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2682 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2683 sizeof(ev->bdaddr), &ev->bdaddr);
2684 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2685 u8 secure;
2686
2687 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2688 secure = 1;
2689 else
2690 secure = 0;
2691
2692 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2693 }
2694
2695 unlock:
2696 hci_dev_unlock(hdev);
2697 }
2698
2699 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2700 {
2701 struct hci_ev_link_key_req *ev = (void *) skb->data;
2702 struct hci_cp_link_key_reply cp;
2703 struct hci_conn *conn;
2704 struct link_key *key;
2705
2706 BT_DBG("%s", hdev->name);
2707
2708 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2709 return;
2710
2711 hci_dev_lock(hdev);
2712
2713 key = hci_find_link_key(hdev, &ev->bdaddr);
2714 if (!key) {
2715 BT_DBG("%s link key not found for %pMR", hdev->name,
2716 &ev->bdaddr);
2717 goto not_found;
2718 }
2719
2720 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2721 &ev->bdaddr);
2722
2723 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2724 key->type == HCI_LK_DEBUG_COMBINATION) {
2725 BT_DBG("%s ignoring debug key", hdev->name);
2726 goto not_found;
2727 }
2728
2729 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2730 if (conn) {
2731 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2732 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2733 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2734 goto not_found;
2735 }
2736
2737 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2738 conn->pending_sec_level == BT_SECURITY_HIGH) {
2739 BT_DBG("%s ignoring key unauthenticated for high security",
2740 hdev->name);
2741 goto not_found;
2742 }
2743
2744 conn->key_type = key->type;
2745 conn->pin_length = key->pin_len;
2746 }
2747
2748 bacpy(&cp.bdaddr, &ev->bdaddr);
2749 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2750
2751 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2752
2753 hci_dev_unlock(hdev);
2754
2755 return;
2756
2757 not_found:
2758 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2759 hci_dev_unlock(hdev);
2760 }
2761
2762 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2763 {
2764 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2765 struct hci_conn *conn;
2766 u8 pin_len = 0;
2767
2768 BT_DBG("%s", hdev->name);
2769
2770 hci_dev_lock(hdev);
2771
2772 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2773 if (conn) {
2774 hci_conn_hold(conn);
2775 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2776 pin_len = conn->pin_length;
2777
2778 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2779 conn->key_type = ev->key_type;
2780
2781 hci_conn_put(conn);
2782 }
2783
2784 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2785 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2786 ev->key_type, pin_len);
2787
2788 hci_dev_unlock(hdev);
2789 }
2790
2791 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2792 {
2793 struct hci_ev_clock_offset *ev = (void *) skb->data;
2794 struct hci_conn *conn;
2795
2796 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2797
2798 hci_dev_lock(hdev);
2799
2800 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2801 if (conn && !ev->status) {
2802 struct inquiry_entry *ie;
2803
2804 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2805 if (ie) {
2806 ie->data.clock_offset = ev->clock_offset;
2807 ie->timestamp = jiffies;
2808 }
2809 }
2810
2811 hci_dev_unlock(hdev);
2812 }
2813
2814 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2815 {
2816 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2817 struct hci_conn *conn;
2818
2819 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2820
2821 hci_dev_lock(hdev);
2822
2823 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2824 if (conn && !ev->status)
2825 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2826
2827 hci_dev_unlock(hdev);
2828 }
2829
2830 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2831 {
2832 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2833 struct inquiry_entry *ie;
2834
2835 BT_DBG("%s", hdev->name);
2836
2837 hci_dev_lock(hdev);
2838
2839 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2840 if (ie) {
2841 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2842 ie->timestamp = jiffies;
2843 }
2844
2845 hci_dev_unlock(hdev);
2846 }
2847
2848 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2849 struct sk_buff *skb)
2850 {
2851 struct inquiry_data data;
2852 int num_rsp = *((__u8 *) skb->data);
2853 bool name_known, ssp;
2854
2855 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2856
2857 if (!num_rsp)
2858 return;
2859
2860 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2861 return;
2862
2863 hci_dev_lock(hdev);
2864
2865 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2866 struct inquiry_info_with_rssi_and_pscan_mode *info;
2867 info = (void *) (skb->data + 1);
2868
2869 for (; num_rsp; num_rsp--, info++) {
2870 bacpy(&data.bdaddr, &info->bdaddr);
2871 data.pscan_rep_mode = info->pscan_rep_mode;
2872 data.pscan_period_mode = info->pscan_period_mode;
2873 data.pscan_mode = info->pscan_mode;
2874 memcpy(data.dev_class, info->dev_class, 3);
2875 data.clock_offset = info->clock_offset;
2876 data.rssi = info->rssi;
2877 data.ssp_mode = 0x00;
2878
2879 name_known = hci_inquiry_cache_update(hdev, &data,
2880 false, &ssp);
2881 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2882 info->dev_class, info->rssi,
2883 !name_known, ssp, NULL, 0);
2884 }
2885 } else {
2886 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2887
2888 for (; num_rsp; num_rsp--, info++) {
2889 bacpy(&data.bdaddr, &info->bdaddr);
2890 data.pscan_rep_mode = info->pscan_rep_mode;
2891 data.pscan_period_mode = info->pscan_period_mode;
2892 data.pscan_mode = 0x00;
2893 memcpy(data.dev_class, info->dev_class, 3);
2894 data.clock_offset = info->clock_offset;
2895 data.rssi = info->rssi;
2896 data.ssp_mode = 0x00;
2897 name_known = hci_inquiry_cache_update(hdev, &data,
2898 false, &ssp);
2899 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2900 info->dev_class, info->rssi,
2901 !name_known, ssp, NULL, 0);
2902 }
2903 }
2904
2905 hci_dev_unlock(hdev);
2906 }
2907
2908 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2909 struct sk_buff *skb)
2910 {
2911 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2912 struct hci_conn *conn;
2913
2914 BT_DBG("%s", hdev->name);
2915
2916 hci_dev_lock(hdev);
2917
2918 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2919 if (!conn)
2920 goto unlock;
2921
2922 if (!ev->status && ev->page == 0x01) {
2923 struct inquiry_entry *ie;
2924
2925 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2926 if (ie)
2927 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2928
2929 if (ev->features[0] & LMP_HOST_SSP)
2930 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2931 }
2932
2933 if (conn->state != BT_CONFIG)
2934 goto unlock;
2935
2936 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2937 struct hci_cp_remote_name_req cp;
2938 memset(&cp, 0, sizeof(cp));
2939 bacpy(&cp.bdaddr, &conn->dst);
2940 cp.pscan_rep_mode = 0x02;
2941 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2942 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2943 mgmt_device_connected(hdev, &conn->dst, conn->type,
2944 conn->dst_type, 0, NULL, 0,
2945 conn->dev_class);
2946
2947 if (!hci_outgoing_auth_needed(hdev, conn)) {
2948 conn->state = BT_CONNECTED;
2949 hci_proto_connect_cfm(conn, ev->status);
2950 hci_conn_put(conn);
2951 }
2952
2953 unlock:
2954 hci_dev_unlock(hdev);
2955 }
2956
2957 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2958 struct sk_buff *skb)
2959 {
2960 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2961 struct hci_conn *conn;
2962
2963 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2964
2965 hci_dev_lock(hdev);
2966
2967 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2968 if (!conn) {
2969 if (ev->link_type == ESCO_LINK)
2970 goto unlock;
2971
2972 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2973 if (!conn)
2974 goto unlock;
2975
2976 conn->type = SCO_LINK;
2977 }
2978
2979 switch (ev->status) {
2980 case 0x00:
2981 conn->handle = __le16_to_cpu(ev->handle);
2982 conn->state = BT_CONNECTED;
2983
2984 hci_conn_hold_device(conn);
2985 hci_conn_add_sysfs(conn);
2986 break;
2987
2988 case 0x11: /* Unsupported Feature or Parameter Value */
2989 case 0x1c: /* SCO interval rejected */
2990 case 0x1a: /* Unsupported Remote Feature */
2991 case 0x1f: /* Unspecified error */
2992 if (conn->out && conn->attempt < 2) {
2993 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2994 (hdev->esco_type & EDR_ESCO_MASK);
2995 hci_setup_sync(conn, conn->link->handle);
2996 goto unlock;
2997 }
2998 /* fall through */
2999
3000 default:
3001 conn->state = BT_CLOSED;
3002 break;
3003 }
3004
3005 hci_proto_connect_cfm(conn, ev->status);
3006 if (ev->status)
3007 hci_conn_del(conn);
3008
3009 unlock:
3010 hci_dev_unlock(hdev);
3011 }
3012
3013 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3014 struct sk_buff *skb)
3015 {
3016 struct inquiry_data data;
3017 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3018 int num_rsp = *((__u8 *) skb->data);
3019 size_t eir_len;
3020
3021 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3022
3023 if (!num_rsp)
3024 return;
3025
3026 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3027 return;
3028
3029 hci_dev_lock(hdev);
3030
3031 for (; num_rsp; num_rsp--, info++) {
3032 bool name_known, ssp;
3033
3034 bacpy(&data.bdaddr, &info->bdaddr);
3035 data.pscan_rep_mode = info->pscan_rep_mode;
3036 data.pscan_period_mode = info->pscan_period_mode;
3037 data.pscan_mode = 0x00;
3038 memcpy(data.dev_class, info->dev_class, 3);
3039 data.clock_offset = info->clock_offset;
3040 data.rssi = info->rssi;
3041 data.ssp_mode = 0x01;
3042
3043 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3044 name_known = eir_has_data_type(info->data,
3045 sizeof(info->data),
3046 EIR_NAME_COMPLETE);
3047 else
3048 name_known = true;
3049
3050 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3051 &ssp);
3052 eir_len = eir_get_length(info->data, sizeof(info->data));
3053 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3054 info->dev_class, info->rssi, !name_known,
3055 ssp, info->data, eir_len);
3056 }
3057
3058 hci_dev_unlock(hdev);
3059 }
3060
3061 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3062 struct sk_buff *skb)
3063 {
3064 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3065 struct hci_conn *conn;
3066
3067 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3068 __le16_to_cpu(ev->handle));
3069
3070 hci_dev_lock(hdev);
3071
3072 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3073 if (!conn)
3074 goto unlock;
3075
3076 if (!ev->status)
3077 conn->sec_level = conn->pending_sec_level;
3078
3079 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3080
3081 if (ev->status && conn->state == BT_CONNECTED) {
3082 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3083 hci_conn_put(conn);
3084 goto unlock;
3085 }
3086
3087 if (conn->state == BT_CONFIG) {
3088 if (!ev->status)
3089 conn->state = BT_CONNECTED;
3090
3091 hci_proto_connect_cfm(conn, ev->status);
3092 hci_conn_put(conn);
3093 } else {
3094 hci_auth_cfm(conn, ev->status);
3095
3096 hci_conn_hold(conn);
3097 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3098 hci_conn_put(conn);
3099 }
3100
3101 unlock:
3102 hci_dev_unlock(hdev);
3103 }
3104
3105 static u8 hci_get_auth_req(struct hci_conn *conn)
3106 {
3107 /* If remote requests dedicated bonding follow that lead */
3108 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3109 /* If both remote and local IO capabilities allow MITM
3110 * protection then require it, otherwise don't */
3111 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3112 return 0x02;
3113 else
3114 return 0x03;
3115 }
3116
3117 /* If remote requests no-bonding follow that lead */
3118 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3119 return conn->remote_auth | (conn->auth_type & 0x01);
3120
3121 return conn->auth_type;
3122 }
3123
3124 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3125 {
3126 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3127 struct hci_conn *conn;
3128
3129 BT_DBG("%s", hdev->name);
3130
3131 hci_dev_lock(hdev);
3132
3133 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3134 if (!conn)
3135 goto unlock;
3136
3137 hci_conn_hold(conn);
3138
3139 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3140 goto unlock;
3141
3142 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3143 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3144 struct hci_cp_io_capability_reply cp;
3145
3146 bacpy(&cp.bdaddr, &ev->bdaddr);
3147 /* Change the IO capability from KeyboardDisplay
3148 * to DisplayYesNo as it is not supported by BT spec. */
3149 cp.capability = (conn->io_capability == 0x04) ?
3150 0x01 : conn->io_capability;
3151 conn->auth_type = hci_get_auth_req(conn);
3152 cp.authentication = conn->auth_type;
3153
3154 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3155 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3156 cp.oob_data = 0x01;
3157 else
3158 cp.oob_data = 0x00;
3159
3160 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3161 sizeof(cp), &cp);
3162 } else {
3163 struct hci_cp_io_capability_neg_reply cp;
3164
3165 bacpy(&cp.bdaddr, &ev->bdaddr);
3166 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3167
3168 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3169 sizeof(cp), &cp);
3170 }
3171
3172 unlock:
3173 hci_dev_unlock(hdev);
3174 }
3175
3176 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3177 {
3178 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3179 struct hci_conn *conn;
3180
3181 BT_DBG("%s", hdev->name);
3182
3183 hci_dev_lock(hdev);
3184
3185 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3186 if (!conn)
3187 goto unlock;
3188
3189 conn->remote_cap = ev->capability;
3190 conn->remote_auth = ev->authentication;
3191 if (ev->oob_data)
3192 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3193
3194 unlock:
3195 hci_dev_unlock(hdev);
3196 }
3197
3198 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3199 struct sk_buff *skb)
3200 {
3201 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3202 int loc_mitm, rem_mitm, confirm_hint = 0;
3203 struct hci_conn *conn;
3204
3205 BT_DBG("%s", hdev->name);
3206
3207 hci_dev_lock(hdev);
3208
3209 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3210 goto unlock;
3211
3212 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3213 if (!conn)
3214 goto unlock;
3215
3216 loc_mitm = (conn->auth_type & 0x01);
3217 rem_mitm = (conn->remote_auth & 0x01);
3218
3219 /* If we require MITM but the remote device can't provide that
3220 * (it has NoInputNoOutput) then reject the confirmation
3221 * request. The only exception is when we're dedicated bonding
3222 * initiators (connect_cfm_cb set) since then we always have the MITM
3223 * bit set. */
3224 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3225 BT_DBG("Rejecting request: remote device can't provide MITM");
3226 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3227 sizeof(ev->bdaddr), &ev->bdaddr);
3228 goto unlock;
3229 }
3230
3231 /* If no side requires MITM protection; auto-accept */
3232 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3233 (!rem_mitm || conn->io_capability == 0x03)) {
3234
3235 /* If we're not the initiators request authorization to
3236 * proceed from user space (mgmt_user_confirm with
3237 * confirm_hint set to 1). */
3238 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3239 BT_DBG("Confirming auto-accept as acceptor");
3240 confirm_hint = 1;
3241 goto confirm;
3242 }
3243
3244 BT_DBG("Auto-accept of user confirmation with %ums delay",
3245 hdev->auto_accept_delay);
3246
3247 if (hdev->auto_accept_delay > 0) {
3248 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3249 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3250 goto unlock;
3251 }
3252
3253 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3254 sizeof(ev->bdaddr), &ev->bdaddr);
3255 goto unlock;
3256 }
3257
3258 confirm:
3259 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3260 confirm_hint);
3261
3262 unlock:
3263 hci_dev_unlock(hdev);
3264 }
3265
3266 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3267 struct sk_buff *skb)
3268 {
3269 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3270
3271 BT_DBG("%s", hdev->name);
3272
3273 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3274 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3275 }
3276
3277 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3278 struct sk_buff *skb)
3279 {
3280 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3281 struct hci_conn *conn;
3282
3283 BT_DBG("%s", hdev->name);
3284
3285 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3286 if (!conn)
3287 return;
3288
3289 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3290 conn->passkey_entered = 0;
3291
3292 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3293 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3294 conn->dst_type, conn->passkey_notify,
3295 conn->passkey_entered);
3296 }
3297
3298 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3299 {
3300 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3301 struct hci_conn *conn;
3302
3303 BT_DBG("%s", hdev->name);
3304
3305 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3306 if (!conn)
3307 return;
3308
3309 switch (ev->type) {
3310 case HCI_KEYPRESS_STARTED:
3311 conn->passkey_entered = 0;
3312 return;
3313
3314 case HCI_KEYPRESS_ENTERED:
3315 conn->passkey_entered++;
3316 break;
3317
3318 case HCI_KEYPRESS_ERASED:
3319 conn->passkey_entered--;
3320 break;
3321
3322 case HCI_KEYPRESS_CLEARED:
3323 conn->passkey_entered = 0;
3324 break;
3325
3326 case HCI_KEYPRESS_COMPLETED:
3327 return;
3328 }
3329
3330 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3331 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3332 conn->dst_type, conn->passkey_notify,
3333 conn->passkey_entered);
3334 }
3335
3336 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3337 struct sk_buff *skb)
3338 {
3339 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3340 struct hci_conn *conn;
3341
3342 BT_DBG("%s", hdev->name);
3343
3344 hci_dev_lock(hdev);
3345
3346 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3347 if (!conn)
3348 goto unlock;
3349
3350 /* To avoid duplicate auth_failed events to user space we check
3351 * the HCI_CONN_AUTH_PEND flag which will be set if we
3352 * initiated the authentication. A traditional auth_complete
3353 * event gets always produced as initiator and is also mapped to
3354 * the mgmt_auth_failed event */
3355 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3356 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3357 ev->status);
3358
3359 hci_conn_put(conn);
3360
3361 unlock:
3362 hci_dev_unlock(hdev);
3363 }
3364
3365 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3366 struct sk_buff *skb)
3367 {
3368 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3369 struct inquiry_entry *ie;
3370
3371 BT_DBG("%s", hdev->name);
3372
3373 hci_dev_lock(hdev);
3374
3375 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3376 if (ie)
3377 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3378
3379 hci_dev_unlock(hdev);
3380 }
3381
3382 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3383 struct sk_buff *skb)
3384 {
3385 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3386 struct oob_data *data;
3387
3388 BT_DBG("%s", hdev->name);
3389
3390 hci_dev_lock(hdev);
3391
3392 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3393 goto unlock;
3394
3395 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3396 if (data) {
3397 struct hci_cp_remote_oob_data_reply cp;
3398
3399 bacpy(&cp.bdaddr, &ev->bdaddr);
3400 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3401 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3402
3403 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3404 &cp);
3405 } else {
3406 struct hci_cp_remote_oob_data_neg_reply cp;
3407
3408 bacpy(&cp.bdaddr, &ev->bdaddr);
3409 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3410 &cp);
3411 }
3412
3413 unlock:
3414 hci_dev_unlock(hdev);
3415 }
3416
3417 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3418 struct sk_buff *skb)
3419 {
3420 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3421 struct hci_conn *hcon, *bredr_hcon;
3422
3423 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3424 ev->status);
3425
3426 hci_dev_lock(hdev);
3427
3428 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3429 if (!hcon) {
3430 hci_dev_unlock(hdev);
3431 return;
3432 }
3433
3434 if (ev->status) {
3435 hci_conn_del(hcon);
3436 hci_dev_unlock(hdev);
3437 return;
3438 }
3439
3440 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3441
3442 hcon->state = BT_CONNECTED;
3443 bacpy(&hcon->dst, &bredr_hcon->dst);
3444
3445 hci_conn_hold(hcon);
3446 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3447 hci_conn_put(hcon);
3448
3449 hci_conn_hold_device(hcon);
3450 hci_conn_add_sysfs(hcon);
3451
3452 amp_physical_cfm(bredr_hcon, hcon);
3453
3454 hci_dev_unlock(hdev);
3455 }
3456
3457 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3458 {
3459 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3460 struct hci_conn *hcon;
3461 struct hci_chan *hchan;
3462 struct amp_mgr *mgr;
3463
3464 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3465 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3466 ev->status);
3467
3468 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3469 if (!hcon)
3470 return;
3471
3472 /* Create AMP hchan */
3473 hchan = hci_chan_create(hcon);
3474 if (!hchan)
3475 return;
3476
3477 hchan->handle = le16_to_cpu(ev->handle);
3478
3479 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3480
3481 mgr = hcon->amp_mgr;
3482 if (mgr && mgr->bredr_chan) {
3483 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3484
3485 l2cap_chan_lock(bredr_chan);
3486
3487 bredr_chan->conn->mtu = hdev->block_mtu;
3488 l2cap_logical_cfm(bredr_chan, hchan, 0);
3489 hci_conn_hold(hcon);
3490
3491 l2cap_chan_unlock(bredr_chan);
3492 }
3493 }
3494
3495 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3496 struct sk_buff *skb)
3497 {
3498 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3499 struct hci_chan *hchan;
3500
3501 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3502 le16_to_cpu(ev->handle), ev->status);
3503
3504 if (ev->status)
3505 return;
3506
3507 hci_dev_lock(hdev);
3508
3509 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3510 if (!hchan)
3511 goto unlock;
3512
3513 amp_destroy_logical_link(hchan, ev->reason);
3514
3515 unlock:
3516 hci_dev_unlock(hdev);
3517 }
3518
3519 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3520 struct sk_buff *skb)
3521 {
3522 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3523 struct hci_conn *hcon;
3524
3525 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3526
3527 if (ev->status)
3528 return;
3529
3530 hci_dev_lock(hdev);
3531
3532 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3533 if (hcon) {
3534 hcon->state = BT_CLOSED;
3535 hci_conn_del(hcon);
3536 }
3537
3538 hci_dev_unlock(hdev);
3539 }
3540
3541 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3542 {
3543 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3544 struct hci_conn *conn;
3545
3546 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3547
3548 hci_dev_lock(hdev);
3549
3550 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3551 if (!conn) {
3552 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3553 if (!conn) {
3554 BT_ERR("No memory for new connection");
3555 goto unlock;
3556 }
3557
3558 conn->dst_type = ev->bdaddr_type;
3559
3560 if (ev->role == LE_CONN_ROLE_MASTER) {
3561 conn->out = true;
3562 conn->link_mode |= HCI_LM_MASTER;
3563 }
3564 }
3565
3566 if (ev->status) {
3567 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3568 conn->dst_type, ev->status);
3569 hci_proto_connect_cfm(conn, ev->status);
3570 conn->state = BT_CLOSED;
3571 hci_conn_del(conn);
3572 goto unlock;
3573 }
3574
3575 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3576 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3577 conn->dst_type, 0, NULL, 0, NULL);
3578
3579 conn->sec_level = BT_SECURITY_LOW;
3580 conn->handle = __le16_to_cpu(ev->handle);
3581 conn->state = BT_CONNECTED;
3582
3583 hci_conn_hold_device(conn);
3584 hci_conn_add_sysfs(conn);
3585
3586 hci_proto_connect_cfm(conn, ev->status);
3587
3588 unlock:
3589 hci_dev_unlock(hdev);
3590 }
3591
3592 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3593 {
3594 u8 num_reports = skb->data[0];
3595 void *ptr = &skb->data[1];
3596 s8 rssi;
3597
3598 while (num_reports--) {
3599 struct hci_ev_le_advertising_info *ev = ptr;
3600
3601 rssi = ev->data[ev->length];
3602 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3603 NULL, rssi, 0, 1, ev->data, ev->length);
3604
3605 ptr += sizeof(*ev) + ev->length + 1;
3606 }
3607 }
3608
3609 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3610 {
3611 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3612 struct hci_cp_le_ltk_reply cp;
3613 struct hci_cp_le_ltk_neg_reply neg;
3614 struct hci_conn *conn;
3615 struct smp_ltk *ltk;
3616
3617 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3618
3619 hci_dev_lock(hdev);
3620
3621 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3622 if (conn == NULL)
3623 goto not_found;
3624
3625 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3626 if (ltk == NULL)
3627 goto not_found;
3628
3629 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3630 cp.handle = cpu_to_le16(conn->handle);
3631
3632 if (ltk->authenticated)
3633 conn->sec_level = BT_SECURITY_HIGH;
3634
3635 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3636
3637 if (ltk->type & HCI_SMP_STK) {
3638 list_del(&ltk->list);
3639 kfree(ltk);
3640 }
3641
3642 hci_dev_unlock(hdev);
3643
3644 return;
3645
3646 not_found:
3647 neg.handle = ev->handle;
3648 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3649 hci_dev_unlock(hdev);
3650 }
3651
3652 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3653 {
3654 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3655
3656 skb_pull(skb, sizeof(*le_ev));
3657
3658 switch (le_ev->subevent) {
3659 case HCI_EV_LE_CONN_COMPLETE:
3660 hci_le_conn_complete_evt(hdev, skb);
3661 break;
3662
3663 case HCI_EV_LE_ADVERTISING_REPORT:
3664 hci_le_adv_report_evt(hdev, skb);
3665 break;
3666
3667 case HCI_EV_LE_LTK_REQ:
3668 hci_le_ltk_request_evt(hdev, skb);
3669 break;
3670
3671 default:
3672 break;
3673 }
3674 }
3675
3676 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3677 {
3678 struct hci_ev_channel_selected *ev = (void *) skb->data;
3679 struct hci_conn *hcon;
3680
3681 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3682
3683 skb_pull(skb, sizeof(*ev));
3684
3685 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3686 if (!hcon)
3687 return;
3688
3689 amp_read_loc_assoc_final_data(hdev, hcon);
3690 }
3691
3692 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3693 {
3694 struct hci_event_hdr *hdr = (void *) skb->data;
3695 __u8 event = hdr->evt;
3696
3697 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3698
3699 switch (event) {
3700 case HCI_EV_INQUIRY_COMPLETE:
3701 hci_inquiry_complete_evt(hdev, skb);
3702 break;
3703
3704 case HCI_EV_INQUIRY_RESULT:
3705 hci_inquiry_result_evt(hdev, skb);
3706 break;
3707
3708 case HCI_EV_CONN_COMPLETE:
3709 hci_conn_complete_evt(hdev, skb);
3710 break;
3711
3712 case HCI_EV_CONN_REQUEST:
3713 hci_conn_request_evt(hdev, skb);
3714 break;
3715
3716 case HCI_EV_DISCONN_COMPLETE:
3717 hci_disconn_complete_evt(hdev, skb);
3718 break;
3719
3720 case HCI_EV_AUTH_COMPLETE:
3721 hci_auth_complete_evt(hdev, skb);
3722 break;
3723
3724 case HCI_EV_REMOTE_NAME:
3725 hci_remote_name_evt(hdev, skb);
3726 break;
3727
3728 case HCI_EV_ENCRYPT_CHANGE:
3729 hci_encrypt_change_evt(hdev, skb);
3730 break;
3731
3732 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3733 hci_change_link_key_complete_evt(hdev, skb);
3734 break;
3735
3736 case HCI_EV_REMOTE_FEATURES:
3737 hci_remote_features_evt(hdev, skb);
3738 break;
3739
3740 case HCI_EV_CMD_COMPLETE:
3741 hci_cmd_complete_evt(hdev, skb);
3742 break;
3743
3744 case HCI_EV_CMD_STATUS:
3745 hci_cmd_status_evt(hdev, skb);
3746 break;
3747
3748 case HCI_EV_ROLE_CHANGE:
3749 hci_role_change_evt(hdev, skb);
3750 break;
3751
3752 case HCI_EV_NUM_COMP_PKTS:
3753 hci_num_comp_pkts_evt(hdev, skb);
3754 break;
3755
3756 case HCI_EV_MODE_CHANGE:
3757 hci_mode_change_evt(hdev, skb);
3758 break;
3759
3760 case HCI_EV_PIN_CODE_REQ:
3761 hci_pin_code_request_evt(hdev, skb);
3762 break;
3763
3764 case HCI_EV_LINK_KEY_REQ:
3765 hci_link_key_request_evt(hdev, skb);
3766 break;
3767
3768 case HCI_EV_LINK_KEY_NOTIFY:
3769 hci_link_key_notify_evt(hdev, skb);
3770 break;
3771
3772 case HCI_EV_CLOCK_OFFSET:
3773 hci_clock_offset_evt(hdev, skb);
3774 break;
3775
3776 case HCI_EV_PKT_TYPE_CHANGE:
3777 hci_pkt_type_change_evt(hdev, skb);
3778 break;
3779
3780 case HCI_EV_PSCAN_REP_MODE:
3781 hci_pscan_rep_mode_evt(hdev, skb);
3782 break;
3783
3784 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3785 hci_inquiry_result_with_rssi_evt(hdev, skb);
3786 break;
3787
3788 case HCI_EV_REMOTE_EXT_FEATURES:
3789 hci_remote_ext_features_evt(hdev, skb);
3790 break;
3791
3792 case HCI_EV_SYNC_CONN_COMPLETE:
3793 hci_sync_conn_complete_evt(hdev, skb);
3794 break;
3795
3796 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3797 hci_extended_inquiry_result_evt(hdev, skb);
3798 break;
3799
3800 case HCI_EV_KEY_REFRESH_COMPLETE:
3801 hci_key_refresh_complete_evt(hdev, skb);
3802 break;
3803
3804 case HCI_EV_IO_CAPA_REQUEST:
3805 hci_io_capa_request_evt(hdev, skb);
3806 break;
3807
3808 case HCI_EV_IO_CAPA_REPLY:
3809 hci_io_capa_reply_evt(hdev, skb);
3810 break;
3811
3812 case HCI_EV_USER_CONFIRM_REQUEST:
3813 hci_user_confirm_request_evt(hdev, skb);
3814 break;
3815
3816 case HCI_EV_USER_PASSKEY_REQUEST:
3817 hci_user_passkey_request_evt(hdev, skb);
3818 break;
3819
3820 case HCI_EV_USER_PASSKEY_NOTIFY:
3821 hci_user_passkey_notify_evt(hdev, skb);
3822 break;
3823
3824 case HCI_EV_KEYPRESS_NOTIFY:
3825 hci_keypress_notify_evt(hdev, skb);
3826 break;
3827
3828 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3829 hci_simple_pair_complete_evt(hdev, skb);
3830 break;
3831
3832 case HCI_EV_REMOTE_HOST_FEATURES:
3833 hci_remote_host_features_evt(hdev, skb);
3834 break;
3835
3836 case HCI_EV_LE_META:
3837 hci_le_meta_evt(hdev, skb);
3838 break;
3839
3840 case HCI_EV_CHANNEL_SELECTED:
3841 hci_chan_selected_evt(hdev, skb);
3842 break;
3843
3844 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3845 hci_remote_oob_data_request_evt(hdev, skb);
3846 break;
3847
3848 case HCI_EV_PHY_LINK_COMPLETE:
3849 hci_phy_link_complete_evt(hdev, skb);
3850 break;
3851
3852 case HCI_EV_LOGICAL_LINK_COMPLETE:
3853 hci_loglink_complete_evt(hdev, skb);
3854 break;
3855
3856 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3857 hci_disconn_loglink_complete_evt(hdev, skb);
3858 break;
3859
3860 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3861 hci_disconn_phylink_complete_evt(hdev, skb);
3862 break;
3863
3864 case HCI_EV_NUM_COMP_BLOCKS:
3865 hci_num_comp_blocks_evt(hdev, skb);
3866 break;
3867
3868 default:
3869 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3870 break;
3871 }
3872
3873 kfree_skb(skb);
3874 hdev->stat.evt_rx++;
3875 }