Bluetooth: Fix hci_inquiry ioctl usage
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
34
35 /* Handle HCI Event packets */
36
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 __u8 status = *((__u8 *) skb->data);
40
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
42
43 if (status) {
44 hci_dev_lock(hdev);
45 mgmt_stop_discovery_failed(hdev, status);
46 hci_dev_unlock(hdev);
47 return;
48 }
49
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
52 wake_up_bit(&hdev->flags, HCI_INQUIRY);
53
54 hci_dev_lock(hdev);
55 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
56 hci_dev_unlock(hdev);
57
58 hci_conn_check_pending(hdev);
59 }
60
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 {
63 __u8 status = *((__u8 *) skb->data);
64
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
66
67 if (status)
68 return;
69
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 }
72
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 __u8 status = *((__u8 *) skb->data);
76
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78
79 if (status)
80 return;
81
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83
84 hci_conn_check_pending(hdev);
85 }
86
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct sk_buff *skb)
161 {
162 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163
164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165
166 if (rp->status)
167 return;
168
169 hdev->link_policy = __le16_to_cpu(rp->policy);
170 }
171
172 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 struct sk_buff *skb)
174 {
175 __u8 status = *((__u8 *) skb->data);
176 void *sent;
177
178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
179
180 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 if (!sent)
182 return;
183
184 if (!status)
185 hdev->link_policy = get_unaligned_le16(sent);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 /* Reset all non-persistent flags */
197 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198
199 hdev->discovery.state = DISCOVERY_STOPPED;
200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202
203 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 hdev->adv_data_len = 0;
205 }
206
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208 {
209 __u8 status = *((__u8 *) skb->data);
210 void *sent;
211
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
213
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215 if (!sent)
216 return;
217
218 hci_dev_lock(hdev);
219
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
222 else if (!status)
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224
225 hci_dev_unlock(hdev);
226 }
227
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
231
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233
234 if (rp->status)
235 return;
236
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
245
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
247
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 if (!sent)
250 return;
251
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
254
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
257 else
258 clear_bit(HCI_AUTH, &hdev->flags);
259 }
260
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
263 }
264
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
266 {
267 __u8 status = *((__u8 *) skb->data);
268 void *sent;
269
270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
271
272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
273 if (!sent)
274 return;
275
276 if (!status) {
277 __u8 param = *((__u8 *) sent);
278
279 if (param)
280 set_bit(HCI_ENCRYPT, &hdev->flags);
281 else
282 clear_bit(HCI_ENCRYPT, &hdev->flags);
283 }
284 }
285
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 {
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
290 void *sent;
291
292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
293
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 if (!sent)
296 return;
297
298 param = *((__u8 *) sent);
299
300 hci_dev_lock(hdev);
301
302 if (status) {
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
305 goto done;
306 }
307
308 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310
311 if (param & SCAN_INQUIRY) {
312 set_bit(HCI_ISCAN, &hdev->flags);
313 if (!old_iscan)
314 mgmt_discoverable(hdev, 1);
315 if (hdev->discov_timeout > 0) {
316 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318 to);
319 }
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
322
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
325 if (!old_pscan)
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
329
330 done:
331 hci_dev_unlock(hdev);
332 }
333
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337
338 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
339
340 if (rp->status)
341 return;
342
343 memcpy(hdev->dev_class, rp->dev_class, 3);
344
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 __u8 status = *((__u8 *) skb->data);
352 void *sent;
353
354 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 if (!sent)
358 return;
359
360 hci_dev_lock(hdev);
361
362 if (status == 0)
363 memcpy(hdev->dev_class, sent, 3);
364
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
367
368 hci_dev_unlock(hdev);
369 }
370
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 __u16 setting;
375
376 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
377
378 if (rp->status)
379 return;
380
381 setting = __le16_to_cpu(rp->voice_setting);
382
383 if (hdev->voice_setting == setting)
384 return;
385
386 hdev->voice_setting = setting;
387
388 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
389
390 if (hdev->notify)
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
395 struct sk_buff *skb)
396 {
397 __u8 status = *((__u8 *) skb->data);
398 __u16 setting;
399 void *sent;
400
401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
402
403 if (status)
404 return;
405
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 if (!sent)
408 return;
409
410 setting = get_unaligned_le16(sent);
411
412 if (hdev->voice_setting == setting)
413 return;
414
415 hdev->voice_setting = setting;
416
417 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
418
419 if (hdev->notify)
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421 }
422
423 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 __u8 status = *((__u8 *) skb->data);
426 struct hci_cp_write_ssp_mode *sent;
427
428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
429
430 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
431 if (!sent)
432 return;
433
434 if (!status) {
435 if (sent->mode)
436 hdev->host_features[0] |= LMP_HOST_SSP;
437 else
438 hdev->host_features[0] &= ~LMP_HOST_SSP;
439 }
440
441 if (test_bit(HCI_MGMT, &hdev->dev_flags))
442 mgmt_ssp_enable_complete(hdev, sent->mode, status);
443 else if (!status) {
444 if (sent->mode)
445 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
446 else
447 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 }
449 }
450
451 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
452 {
453 struct hci_rp_read_local_version *rp = (void *) skb->data;
454
455 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
456
457 if (rp->status)
458 return;
459
460 hdev->hci_ver = rp->hci_ver;
461 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
462 hdev->lmp_ver = rp->lmp_ver;
463 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
464 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
465
466 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
467 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
468 }
469
470 static void hci_cc_read_local_commands(struct hci_dev *hdev,
471 struct sk_buff *skb)
472 {
473 struct hci_rp_read_local_commands *rp = (void *) skb->data;
474
475 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
476
477 if (!rp->status)
478 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
479 }
480
481 static void hci_cc_read_local_features(struct hci_dev *hdev,
482 struct sk_buff *skb)
483 {
484 struct hci_rp_read_local_features *rp = (void *) skb->data;
485
486 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
487
488 if (rp->status)
489 return;
490
491 memcpy(hdev->features, rp->features, 8);
492
493 /* Adjust default settings according to features
494 * supported by device. */
495
496 if (hdev->features[0] & LMP_3SLOT)
497 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
498
499 if (hdev->features[0] & LMP_5SLOT)
500 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
501
502 if (hdev->features[1] & LMP_HV2) {
503 hdev->pkt_type |= (HCI_HV2);
504 hdev->esco_type |= (ESCO_HV2);
505 }
506
507 if (hdev->features[1] & LMP_HV3) {
508 hdev->pkt_type |= (HCI_HV3);
509 hdev->esco_type |= (ESCO_HV3);
510 }
511
512 if (lmp_esco_capable(hdev))
513 hdev->esco_type |= (ESCO_EV3);
514
515 if (hdev->features[4] & LMP_EV4)
516 hdev->esco_type |= (ESCO_EV4);
517
518 if (hdev->features[4] & LMP_EV5)
519 hdev->esco_type |= (ESCO_EV5);
520
521 if (hdev->features[5] & LMP_EDR_ESCO_2M)
522 hdev->esco_type |= (ESCO_2EV3);
523
524 if (hdev->features[5] & LMP_EDR_ESCO_3M)
525 hdev->esco_type |= (ESCO_3EV3);
526
527 if (hdev->features[5] & LMP_EDR_3S_ESCO)
528 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
529
530 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
531 hdev->features[0], hdev->features[1],
532 hdev->features[2], hdev->features[3],
533 hdev->features[4], hdev->features[5],
534 hdev->features[6], hdev->features[7]);
535 }
536
537 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
538 struct sk_buff *skb)
539 {
540 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
541
542 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
543
544 if (rp->status)
545 return;
546
547 switch (rp->page) {
548 case 0:
549 memcpy(hdev->features, rp->features, 8);
550 break;
551 case 1:
552 memcpy(hdev->host_features, rp->features, 8);
553 break;
554 }
555 }
556
557 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
558 struct sk_buff *skb)
559 {
560 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
561
562 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
563
564 if (!rp->status)
565 hdev->flow_ctl_mode = rp->mode;
566 }
567
568 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
569 {
570 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
571
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573
574 if (rp->status)
575 return;
576
577 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
578 hdev->sco_mtu = rp->sco_mtu;
579 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
580 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
581
582 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
583 hdev->sco_mtu = 64;
584 hdev->sco_pkts = 8;
585 }
586
587 hdev->acl_cnt = hdev->acl_pkts;
588 hdev->sco_cnt = hdev->sco_pkts;
589
590 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
591 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
592 }
593
594 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
595 {
596 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
597
598 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
599
600 if (!rp->status)
601 bacpy(&hdev->bdaddr, &rp->bdaddr);
602 }
603
604 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
605 struct sk_buff *skb)
606 {
607 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
608
609 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610
611 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
612 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
613 hdev->page_scan_window = __le16_to_cpu(rp->window);
614 }
615 }
616
617 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
618 struct sk_buff *skb)
619 {
620 u8 status = *((u8 *) skb->data);
621 struct hci_cp_write_page_scan_activity *sent;
622
623 BT_DBG("%s status 0x%2.2x", hdev->name, status);
624
625 if (status)
626 return;
627
628 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
629 if (!sent)
630 return;
631
632 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
633 hdev->page_scan_window = __le16_to_cpu(sent->window);
634 }
635
636 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
637 struct sk_buff *skb)
638 {
639 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
640
641 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
642
643 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
644 hdev->page_scan_type = rp->type;
645 }
646
647 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
648 struct sk_buff *skb)
649 {
650 u8 status = *((u8 *) skb->data);
651 u8 *type;
652
653 BT_DBG("%s status 0x%2.2x", hdev->name, status);
654
655 if (status)
656 return;
657
658 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
659 if (type)
660 hdev->page_scan_type = *type;
661 }
662
663 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
664 struct sk_buff *skb)
665 {
666 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
667
668 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
669
670 if (rp->status)
671 return;
672
673 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
674 hdev->block_len = __le16_to_cpu(rp->block_len);
675 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
676
677 hdev->block_cnt = hdev->num_blocks;
678
679 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
680 hdev->block_cnt, hdev->block_len);
681 }
682
683 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
684 struct sk_buff *skb)
685 {
686 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
687
688 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
689
690 if (rp->status)
691 goto a2mp_rsp;
692
693 hdev->amp_status = rp->amp_status;
694 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
695 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
696 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
697 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
698 hdev->amp_type = rp->amp_type;
699 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
700 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
701 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
702 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
703
704 a2mp_rsp:
705 a2mp_send_getinfo_rsp(hdev);
706 }
707
708 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
709 struct sk_buff *skb)
710 {
711 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
712 struct amp_assoc *assoc = &hdev->loc_assoc;
713 size_t rem_len, frag_len;
714
715 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716
717 if (rp->status)
718 goto a2mp_rsp;
719
720 frag_len = skb->len - sizeof(*rp);
721 rem_len = __le16_to_cpu(rp->rem_len);
722
723 if (rem_len > frag_len) {
724 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
725
726 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
727 assoc->offset += frag_len;
728
729 /* Read other fragments */
730 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
731
732 return;
733 }
734
735 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
736 assoc->len = assoc->offset + rem_len;
737 assoc->offset = 0;
738
739 a2mp_rsp:
740 /* Send A2MP Rsp when all fragments are received */
741 a2mp_send_getampassoc_rsp(hdev, rp->status);
742 a2mp_send_create_phy_link_req(hdev, rp->status);
743 }
744
745 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
746 struct sk_buff *skb)
747 {
748 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
749
750 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
751
752 if (!rp->status)
753 hdev->inq_tx_power = rp->tx_power;
754 }
755
756 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
757 {
758 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
759 struct hci_cp_pin_code_reply *cp;
760 struct hci_conn *conn;
761
762 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
763
764 hci_dev_lock(hdev);
765
766 if (test_bit(HCI_MGMT, &hdev->dev_flags))
767 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
768
769 if (rp->status)
770 goto unlock;
771
772 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
773 if (!cp)
774 goto unlock;
775
776 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
777 if (conn)
778 conn->pin_length = cp->pin_len;
779
780 unlock:
781 hci_dev_unlock(hdev);
782 }
783
784 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
785 {
786 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
787
788 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
789
790 hci_dev_lock(hdev);
791
792 if (test_bit(HCI_MGMT, &hdev->dev_flags))
793 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
794 rp->status);
795
796 hci_dev_unlock(hdev);
797 }
798
799 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
800 struct sk_buff *skb)
801 {
802 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
803
804 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
805
806 if (rp->status)
807 return;
808
809 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
810 hdev->le_pkts = rp->le_max_pkt;
811
812 hdev->le_cnt = hdev->le_pkts;
813
814 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
815 }
816
817 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
818 struct sk_buff *skb)
819 {
820 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
821
822 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823
824 if (!rp->status)
825 memcpy(hdev->le_features, rp->features, 8);
826 }
827
828 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
829 struct sk_buff *skb)
830 {
831 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
832
833 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
834
835 if (!rp->status)
836 hdev->adv_tx_power = rp->tx_power;
837 }
838
839 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
840 {
841 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
842
843 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
844
845 hci_dev_lock(hdev);
846
847 if (test_bit(HCI_MGMT, &hdev->dev_flags))
848 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
849 rp->status);
850
851 hci_dev_unlock(hdev);
852 }
853
854 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
855 struct sk_buff *skb)
856 {
857 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
858
859 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
860
861 hci_dev_lock(hdev);
862
863 if (test_bit(HCI_MGMT, &hdev->dev_flags))
864 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
865 ACL_LINK, 0, rp->status);
866
867 hci_dev_unlock(hdev);
868 }
869
870 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
871 {
872 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
873
874 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
875
876 hci_dev_lock(hdev);
877
878 if (test_bit(HCI_MGMT, &hdev->dev_flags))
879 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
880 0, rp->status);
881
882 hci_dev_unlock(hdev);
883 }
884
885 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
886 struct sk_buff *skb)
887 {
888 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
889
890 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
891
892 hci_dev_lock(hdev);
893
894 if (test_bit(HCI_MGMT, &hdev->dev_flags))
895 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
896 ACL_LINK, 0, rp->status);
897
898 hci_dev_unlock(hdev);
899 }
900
901 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
902 struct sk_buff *skb)
903 {
904 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
905
906 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
907
908 hci_dev_lock(hdev);
909 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
910 rp->randomizer, rp->status);
911 hci_dev_unlock(hdev);
912 }
913
914 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
915 {
916 __u8 *sent, status = *((__u8 *) skb->data);
917
918 BT_DBG("%s status 0x%2.2x", hdev->name, status);
919
920 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
921 if (!sent)
922 return;
923
924 hci_dev_lock(hdev);
925
926 if (!status) {
927 if (*sent)
928 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
929 else
930 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
931 }
932
933 if (!test_bit(HCI_INIT, &hdev->flags)) {
934 struct hci_request req;
935
936 hci_req_init(&req, hdev);
937 hci_update_ad(&req);
938 hci_req_run(&req, NULL);
939 }
940
941 hci_dev_unlock(hdev);
942 }
943
944 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
945 {
946 __u8 status = *((__u8 *) skb->data);
947
948 BT_DBG("%s status 0x%2.2x", hdev->name, status);
949
950 if (status) {
951 hci_dev_lock(hdev);
952 mgmt_start_discovery_failed(hdev, status);
953 hci_dev_unlock(hdev);
954 return;
955 }
956 }
957
958 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
959 struct sk_buff *skb)
960 {
961 struct hci_cp_le_set_scan_enable *cp;
962 __u8 status = *((__u8 *) skb->data);
963
964 BT_DBG("%s status 0x%2.2x", hdev->name, status);
965
966 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
967 if (!cp)
968 return;
969
970 switch (cp->enable) {
971 case LE_SCANNING_ENABLED:
972 if (status) {
973 hci_dev_lock(hdev);
974 mgmt_start_discovery_failed(hdev, status);
975 hci_dev_unlock(hdev);
976 return;
977 }
978
979 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
980
981 hci_dev_lock(hdev);
982 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
983 hci_dev_unlock(hdev);
984 break;
985
986 case LE_SCANNING_DISABLED:
987 if (status) {
988 hci_dev_lock(hdev);
989 mgmt_stop_discovery_failed(hdev, status);
990 hci_dev_unlock(hdev);
991 return;
992 }
993
994 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
995
996 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
997 hdev->discovery.state == DISCOVERY_FINDING) {
998 mgmt_interleaved_discovery(hdev);
999 } else {
1000 hci_dev_lock(hdev);
1001 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1002 hci_dev_unlock(hdev);
1003 }
1004
1005 break;
1006
1007 default:
1008 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1009 break;
1010 }
1011 }
1012
1013 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1014 struct sk_buff *skb)
1015 {
1016 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1017
1018 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1019
1020 if (!rp->status)
1021 hdev->le_white_list_size = rp->size;
1022 }
1023
1024 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1025 struct sk_buff *skb)
1026 {
1027 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1028
1029 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030
1031 if (!rp->status)
1032 memcpy(hdev->le_states, rp->le_states, 8);
1033 }
1034
1035 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1036 struct sk_buff *skb)
1037 {
1038 struct hci_cp_write_le_host_supported *sent;
1039 __u8 status = *((__u8 *) skb->data);
1040
1041 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1042
1043 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1044 if (!sent)
1045 return;
1046
1047 if (!status) {
1048 if (sent->le)
1049 hdev->host_features[0] |= LMP_HOST_LE;
1050 else
1051 hdev->host_features[0] &= ~LMP_HOST_LE;
1052
1053 if (sent->simul)
1054 hdev->host_features[0] |= LMP_HOST_LE_BREDR;
1055 else
1056 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
1057 }
1058
1059 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1060 !test_bit(HCI_INIT, &hdev->flags))
1061 mgmt_le_enable_complete(hdev, sent->le, status);
1062 }
1063
1064 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1065 struct sk_buff *skb)
1066 {
1067 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1068
1069 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1070 hdev->name, rp->status, rp->phy_handle);
1071
1072 if (rp->status)
1073 return;
1074
1075 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1076 }
1077
1078 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1079 {
1080 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1081
1082 if (status) {
1083 hci_conn_check_pending(hdev);
1084 hci_dev_lock(hdev);
1085 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1086 mgmt_start_discovery_failed(hdev, status);
1087 hci_dev_unlock(hdev);
1088 return;
1089 }
1090
1091 set_bit(HCI_INQUIRY, &hdev->flags);
1092
1093 hci_dev_lock(hdev);
1094 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1095 hci_dev_unlock(hdev);
1096 }
1097
1098 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1099 {
1100 struct hci_cp_create_conn *cp;
1101 struct hci_conn *conn;
1102
1103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1104
1105 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1106 if (!cp)
1107 return;
1108
1109 hci_dev_lock(hdev);
1110
1111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1112
1113 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1114
1115 if (status) {
1116 if (conn && conn->state == BT_CONNECT) {
1117 if (status != 0x0c || conn->attempt > 2) {
1118 conn->state = BT_CLOSED;
1119 hci_proto_connect_cfm(conn, status);
1120 hci_conn_del(conn);
1121 } else
1122 conn->state = BT_CONNECT2;
1123 }
1124 } else {
1125 if (!conn) {
1126 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1127 if (conn) {
1128 conn->out = true;
1129 conn->link_mode |= HCI_LM_MASTER;
1130 } else
1131 BT_ERR("No memory for new connection");
1132 }
1133 }
1134
1135 hci_dev_unlock(hdev);
1136 }
1137
1138 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1139 {
1140 struct hci_cp_add_sco *cp;
1141 struct hci_conn *acl, *sco;
1142 __u16 handle;
1143
1144 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1145
1146 if (!status)
1147 return;
1148
1149 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1150 if (!cp)
1151 return;
1152
1153 handle = __le16_to_cpu(cp->handle);
1154
1155 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1156
1157 hci_dev_lock(hdev);
1158
1159 acl = hci_conn_hash_lookup_handle(hdev, handle);
1160 if (acl) {
1161 sco = acl->link;
1162 if (sco) {
1163 sco->state = BT_CLOSED;
1164
1165 hci_proto_connect_cfm(sco, status);
1166 hci_conn_del(sco);
1167 }
1168 }
1169
1170 hci_dev_unlock(hdev);
1171 }
1172
1173 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1174 {
1175 struct hci_cp_auth_requested *cp;
1176 struct hci_conn *conn;
1177
1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1179
1180 if (!status)
1181 return;
1182
1183 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1184 if (!cp)
1185 return;
1186
1187 hci_dev_lock(hdev);
1188
1189 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1190 if (conn) {
1191 if (conn->state == BT_CONFIG) {
1192 hci_proto_connect_cfm(conn, status);
1193 hci_conn_put(conn);
1194 }
1195 }
1196
1197 hci_dev_unlock(hdev);
1198 }
1199
1200 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1201 {
1202 struct hci_cp_set_conn_encrypt *cp;
1203 struct hci_conn *conn;
1204
1205 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1206
1207 if (!status)
1208 return;
1209
1210 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1211 if (!cp)
1212 return;
1213
1214 hci_dev_lock(hdev);
1215
1216 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1217 if (conn) {
1218 if (conn->state == BT_CONFIG) {
1219 hci_proto_connect_cfm(conn, status);
1220 hci_conn_put(conn);
1221 }
1222 }
1223
1224 hci_dev_unlock(hdev);
1225 }
1226
1227 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1228 struct hci_conn *conn)
1229 {
1230 if (conn->state != BT_CONFIG || !conn->out)
1231 return 0;
1232
1233 if (conn->pending_sec_level == BT_SECURITY_SDP)
1234 return 0;
1235
1236 /* Only request authentication for SSP connections or non-SSP
1237 * devices with sec_level HIGH or if MITM protection is requested */
1238 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1239 conn->pending_sec_level != BT_SECURITY_HIGH)
1240 return 0;
1241
1242 return 1;
1243 }
1244
1245 static int hci_resolve_name(struct hci_dev *hdev,
1246 struct inquiry_entry *e)
1247 {
1248 struct hci_cp_remote_name_req cp;
1249
1250 memset(&cp, 0, sizeof(cp));
1251
1252 bacpy(&cp.bdaddr, &e->data.bdaddr);
1253 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1254 cp.pscan_mode = e->data.pscan_mode;
1255 cp.clock_offset = e->data.clock_offset;
1256
1257 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1258 }
1259
1260 static bool hci_resolve_next_name(struct hci_dev *hdev)
1261 {
1262 struct discovery_state *discov = &hdev->discovery;
1263 struct inquiry_entry *e;
1264
1265 if (list_empty(&discov->resolve))
1266 return false;
1267
1268 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1269 if (!e)
1270 return false;
1271
1272 if (hci_resolve_name(hdev, e) == 0) {
1273 e->name_state = NAME_PENDING;
1274 return true;
1275 }
1276
1277 return false;
1278 }
1279
1280 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1281 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1282 {
1283 struct discovery_state *discov = &hdev->discovery;
1284 struct inquiry_entry *e;
1285
1286 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1287 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1288 name_len, conn->dev_class);
1289
1290 if (discov->state == DISCOVERY_STOPPED)
1291 return;
1292
1293 if (discov->state == DISCOVERY_STOPPING)
1294 goto discov_complete;
1295
1296 if (discov->state != DISCOVERY_RESOLVING)
1297 return;
1298
1299 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1300 /* If the device was not found in a list of found devices names of which
1301 * are pending. there is no need to continue resolving a next name as it
1302 * will be done upon receiving another Remote Name Request Complete
1303 * Event */
1304 if (!e)
1305 return;
1306
1307 list_del(&e->list);
1308 if (name) {
1309 e->name_state = NAME_KNOWN;
1310 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1311 e->data.rssi, name, name_len);
1312 } else {
1313 e->name_state = NAME_NOT_KNOWN;
1314 }
1315
1316 if (hci_resolve_next_name(hdev))
1317 return;
1318
1319 discov_complete:
1320 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1321 }
1322
1323 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1324 {
1325 struct hci_cp_remote_name_req *cp;
1326 struct hci_conn *conn;
1327
1328 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1329
1330 /* If successful wait for the name req complete event before
1331 * checking for the need to do authentication */
1332 if (!status)
1333 return;
1334
1335 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1336 if (!cp)
1337 return;
1338
1339 hci_dev_lock(hdev);
1340
1341 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1342
1343 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1344 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1345
1346 if (!conn)
1347 goto unlock;
1348
1349 if (!hci_outgoing_auth_needed(hdev, conn))
1350 goto unlock;
1351
1352 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1353 struct hci_cp_auth_requested cp;
1354 cp.handle = __cpu_to_le16(conn->handle);
1355 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1356 }
1357
1358 unlock:
1359 hci_dev_unlock(hdev);
1360 }
1361
1362 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1363 {
1364 struct hci_cp_read_remote_features *cp;
1365 struct hci_conn *conn;
1366
1367 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1368
1369 if (!status)
1370 return;
1371
1372 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1373 if (!cp)
1374 return;
1375
1376 hci_dev_lock(hdev);
1377
1378 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1379 if (conn) {
1380 if (conn->state == BT_CONFIG) {
1381 hci_proto_connect_cfm(conn, status);
1382 hci_conn_put(conn);
1383 }
1384 }
1385
1386 hci_dev_unlock(hdev);
1387 }
1388
1389 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1390 {
1391 struct hci_cp_read_remote_ext_features *cp;
1392 struct hci_conn *conn;
1393
1394 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1395
1396 if (!status)
1397 return;
1398
1399 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1400 if (!cp)
1401 return;
1402
1403 hci_dev_lock(hdev);
1404
1405 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1406 if (conn) {
1407 if (conn->state == BT_CONFIG) {
1408 hci_proto_connect_cfm(conn, status);
1409 hci_conn_put(conn);
1410 }
1411 }
1412
1413 hci_dev_unlock(hdev);
1414 }
1415
1416 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1417 {
1418 struct hci_cp_setup_sync_conn *cp;
1419 struct hci_conn *acl, *sco;
1420 __u16 handle;
1421
1422 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1423
1424 if (!status)
1425 return;
1426
1427 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1428 if (!cp)
1429 return;
1430
1431 handle = __le16_to_cpu(cp->handle);
1432
1433 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1434
1435 hci_dev_lock(hdev);
1436
1437 acl = hci_conn_hash_lookup_handle(hdev, handle);
1438 if (acl) {
1439 sco = acl->link;
1440 if (sco) {
1441 sco->state = BT_CLOSED;
1442
1443 hci_proto_connect_cfm(sco, status);
1444 hci_conn_del(sco);
1445 }
1446 }
1447
1448 hci_dev_unlock(hdev);
1449 }
1450
1451 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1452 {
1453 struct hci_cp_sniff_mode *cp;
1454 struct hci_conn *conn;
1455
1456 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1457
1458 if (!status)
1459 return;
1460
1461 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1462 if (!cp)
1463 return;
1464
1465 hci_dev_lock(hdev);
1466
1467 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1468 if (conn) {
1469 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1470
1471 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1472 hci_sco_setup(conn, status);
1473 }
1474
1475 hci_dev_unlock(hdev);
1476 }
1477
1478 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1479 {
1480 struct hci_cp_exit_sniff_mode *cp;
1481 struct hci_conn *conn;
1482
1483 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1484
1485 if (!status)
1486 return;
1487
1488 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1489 if (!cp)
1490 return;
1491
1492 hci_dev_lock(hdev);
1493
1494 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1495 if (conn) {
1496 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1497
1498 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1499 hci_sco_setup(conn, status);
1500 }
1501
1502 hci_dev_unlock(hdev);
1503 }
1504
1505 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1506 {
1507 struct hci_cp_disconnect *cp;
1508 struct hci_conn *conn;
1509
1510 if (!status)
1511 return;
1512
1513 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1514 if (!cp)
1515 return;
1516
1517 hci_dev_lock(hdev);
1518
1519 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1520 if (conn)
1521 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1522 conn->dst_type, status);
1523
1524 hci_dev_unlock(hdev);
1525 }
1526
1527 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1528 {
1529 struct hci_conn *conn;
1530
1531 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1532
1533 if (status) {
1534 hci_dev_lock(hdev);
1535
1536 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1537 if (!conn) {
1538 hci_dev_unlock(hdev);
1539 return;
1540 }
1541
1542 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1543
1544 conn->state = BT_CLOSED;
1545 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1546 conn->dst_type, status);
1547 hci_proto_connect_cfm(conn, status);
1548 hci_conn_del(conn);
1549
1550 hci_dev_unlock(hdev);
1551 }
1552 }
1553
1554 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1555 {
1556 struct hci_cp_create_phy_link *cp;
1557
1558 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1559
1560 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1561 if (!cp)
1562 return;
1563
1564 hci_dev_lock(hdev);
1565
1566 if (status) {
1567 struct hci_conn *hcon;
1568
1569 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1570 if (hcon)
1571 hci_conn_del(hcon);
1572 } else {
1573 amp_write_remote_assoc(hdev, cp->phy_handle);
1574 }
1575
1576 hci_dev_unlock(hdev);
1577 }
1578
1579 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1580 {
1581 struct hci_cp_accept_phy_link *cp;
1582
1583 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1584
1585 if (status)
1586 return;
1587
1588 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1589 if (!cp)
1590 return;
1591
1592 amp_write_remote_assoc(hdev, cp->phy_handle);
1593 }
1594
1595 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1596 {
1597 __u8 status = *((__u8 *) skb->data);
1598 struct discovery_state *discov = &hdev->discovery;
1599 struct inquiry_entry *e;
1600
1601 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1602
1603 hci_conn_check_pending(hdev);
1604
1605 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1606 return;
1607
1608 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1609 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1610
1611 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1612 return;
1613
1614 hci_dev_lock(hdev);
1615
1616 if (discov->state != DISCOVERY_FINDING)
1617 goto unlock;
1618
1619 if (list_empty(&discov->resolve)) {
1620 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1621 goto unlock;
1622 }
1623
1624 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1625 if (e && hci_resolve_name(hdev, e) == 0) {
1626 e->name_state = NAME_PENDING;
1627 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1628 } else {
1629 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1630 }
1631
1632 unlock:
1633 hci_dev_unlock(hdev);
1634 }
1635
1636 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1637 {
1638 struct inquiry_data data;
1639 struct inquiry_info *info = (void *) (skb->data + 1);
1640 int num_rsp = *((__u8 *) skb->data);
1641
1642 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1643
1644 if (!num_rsp)
1645 return;
1646
1647 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1648 return;
1649
1650 hci_dev_lock(hdev);
1651
1652 for (; num_rsp; num_rsp--, info++) {
1653 bool name_known, ssp;
1654
1655 bacpy(&data.bdaddr, &info->bdaddr);
1656 data.pscan_rep_mode = info->pscan_rep_mode;
1657 data.pscan_period_mode = info->pscan_period_mode;
1658 data.pscan_mode = info->pscan_mode;
1659 memcpy(data.dev_class, info->dev_class, 3);
1660 data.clock_offset = info->clock_offset;
1661 data.rssi = 0x00;
1662 data.ssp_mode = 0x00;
1663
1664 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1665 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1666 info->dev_class, 0, !name_known, ssp, NULL,
1667 0);
1668 }
1669
1670 hci_dev_unlock(hdev);
1671 }
1672
1673 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1674 {
1675 struct hci_ev_conn_complete *ev = (void *) skb->data;
1676 struct hci_conn *conn;
1677
1678 BT_DBG("%s", hdev->name);
1679
1680 hci_dev_lock(hdev);
1681
1682 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1683 if (!conn) {
1684 if (ev->link_type != SCO_LINK)
1685 goto unlock;
1686
1687 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1688 if (!conn)
1689 goto unlock;
1690
1691 conn->type = SCO_LINK;
1692 }
1693
1694 if (!ev->status) {
1695 conn->handle = __le16_to_cpu(ev->handle);
1696
1697 if (conn->type == ACL_LINK) {
1698 conn->state = BT_CONFIG;
1699 hci_conn_hold(conn);
1700
1701 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1702 !hci_find_link_key(hdev, &ev->bdaddr))
1703 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1704 else
1705 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1706 } else
1707 conn->state = BT_CONNECTED;
1708
1709 hci_conn_hold_device(conn);
1710 hci_conn_add_sysfs(conn);
1711
1712 if (test_bit(HCI_AUTH, &hdev->flags))
1713 conn->link_mode |= HCI_LM_AUTH;
1714
1715 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1716 conn->link_mode |= HCI_LM_ENCRYPT;
1717
1718 /* Get remote features */
1719 if (conn->type == ACL_LINK) {
1720 struct hci_cp_read_remote_features cp;
1721 cp.handle = ev->handle;
1722 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1723 sizeof(cp), &cp);
1724 }
1725
1726 /* Set packet type for incoming connection */
1727 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1728 struct hci_cp_change_conn_ptype cp;
1729 cp.handle = ev->handle;
1730 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1731 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1732 &cp);
1733 }
1734 } else {
1735 conn->state = BT_CLOSED;
1736 if (conn->type == ACL_LINK)
1737 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1738 conn->dst_type, ev->status);
1739 }
1740
1741 if (conn->type == ACL_LINK)
1742 hci_sco_setup(conn, ev->status);
1743
1744 if (ev->status) {
1745 hci_proto_connect_cfm(conn, ev->status);
1746 hci_conn_del(conn);
1747 } else if (ev->link_type != ACL_LINK)
1748 hci_proto_connect_cfm(conn, ev->status);
1749
1750 unlock:
1751 hci_dev_unlock(hdev);
1752
1753 hci_conn_check_pending(hdev);
1754 }
1755
1756 void hci_conn_accept(struct hci_conn *conn, int mask)
1757 {
1758 struct hci_dev *hdev = conn->hdev;
1759
1760 BT_DBG("conn %p", conn);
1761
1762 conn->state = BT_CONFIG;
1763
1764 if (!lmp_esco_capable(hdev)) {
1765 struct hci_cp_accept_conn_req cp;
1766
1767 bacpy(&cp.bdaddr, &conn->dst);
1768
1769 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1770 cp.role = 0x00; /* Become master */
1771 else
1772 cp.role = 0x01; /* Remain slave */
1773
1774 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
1775 } else /* lmp_esco_capable(hdev)) */ {
1776 struct hci_cp_accept_sync_conn_req cp;
1777
1778 bacpy(&cp.bdaddr, &conn->dst);
1779 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1780
1781 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1782 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1783 cp.max_latency = __constant_cpu_to_le16(0xffff);
1784 cp.content_format = cpu_to_le16(hdev->voice_setting);
1785 cp.retrans_effort = 0xff;
1786
1787 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1788 sizeof(cp), &cp);
1789 }
1790 }
1791
1792 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1793 {
1794 struct hci_ev_conn_request *ev = (void *) skb->data;
1795 int mask = hdev->link_mode;
1796 __u8 flags = 0;
1797
1798 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1799 ev->link_type);
1800
1801 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1802 &flags);
1803
1804 if ((mask & HCI_LM_ACCEPT) &&
1805 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1806 /* Connection accepted */
1807 struct inquiry_entry *ie;
1808 struct hci_conn *conn;
1809
1810 hci_dev_lock(hdev);
1811
1812 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1813 if (ie)
1814 memcpy(ie->data.dev_class, ev->dev_class, 3);
1815
1816 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1817 &ev->bdaddr);
1818 if (!conn) {
1819 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1820 if (!conn) {
1821 BT_ERR("No memory for new connection");
1822 hci_dev_unlock(hdev);
1823 return;
1824 }
1825 }
1826
1827 memcpy(conn->dev_class, ev->dev_class, 3);
1828
1829 hci_dev_unlock(hdev);
1830
1831 if (ev->link_type == ACL_LINK ||
1832 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1833 struct hci_cp_accept_conn_req cp;
1834 conn->state = BT_CONNECT;
1835
1836 bacpy(&cp.bdaddr, &ev->bdaddr);
1837
1838 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1839 cp.role = 0x00; /* Become master */
1840 else
1841 cp.role = 0x01; /* Remain slave */
1842
1843 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1844 &cp);
1845 } else if (!(flags & HCI_PROTO_DEFER)) {
1846 struct hci_cp_accept_sync_conn_req cp;
1847 conn->state = BT_CONNECT;
1848
1849 bacpy(&cp.bdaddr, &ev->bdaddr);
1850 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1851
1852 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1853 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1854 cp.max_latency = __constant_cpu_to_le16(0xffff);
1855 cp.content_format = cpu_to_le16(hdev->voice_setting);
1856 cp.retrans_effort = 0xff;
1857
1858 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1859 sizeof(cp), &cp);
1860 } else {
1861 conn->state = BT_CONNECT2;
1862 hci_proto_connect_cfm(conn, 0);
1863 hci_conn_put(conn);
1864 }
1865 } else {
1866 /* Connection rejected */
1867 struct hci_cp_reject_conn_req cp;
1868
1869 bacpy(&cp.bdaddr, &ev->bdaddr);
1870 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1871 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1872 }
1873 }
1874
1875 static u8 hci_to_mgmt_reason(u8 err)
1876 {
1877 switch (err) {
1878 case HCI_ERROR_CONNECTION_TIMEOUT:
1879 return MGMT_DEV_DISCONN_TIMEOUT;
1880 case HCI_ERROR_REMOTE_USER_TERM:
1881 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1882 case HCI_ERROR_REMOTE_POWER_OFF:
1883 return MGMT_DEV_DISCONN_REMOTE;
1884 case HCI_ERROR_LOCAL_HOST_TERM:
1885 return MGMT_DEV_DISCONN_LOCAL_HOST;
1886 default:
1887 return MGMT_DEV_DISCONN_UNKNOWN;
1888 }
1889 }
1890
1891 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1892 {
1893 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1894 struct hci_conn *conn;
1895
1896 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1897
1898 hci_dev_lock(hdev);
1899
1900 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1901 if (!conn)
1902 goto unlock;
1903
1904 if (ev->status == 0)
1905 conn->state = BT_CLOSED;
1906
1907 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1908 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1909 if (ev->status) {
1910 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1911 conn->dst_type, ev->status);
1912 } else {
1913 u8 reason = hci_to_mgmt_reason(ev->reason);
1914
1915 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1916 conn->dst_type, reason);
1917 }
1918 }
1919
1920 if (ev->status == 0) {
1921 if (conn->type == ACL_LINK && conn->flush_key)
1922 hci_remove_link_key(hdev, &conn->dst);
1923 hci_proto_disconn_cfm(conn, ev->reason);
1924 hci_conn_del(conn);
1925 }
1926
1927 unlock:
1928 hci_dev_unlock(hdev);
1929 }
1930
1931 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1932 {
1933 struct hci_ev_auth_complete *ev = (void *) skb->data;
1934 struct hci_conn *conn;
1935
1936 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1937
1938 hci_dev_lock(hdev);
1939
1940 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1941 if (!conn)
1942 goto unlock;
1943
1944 if (!ev->status) {
1945 if (!hci_conn_ssp_enabled(conn) &&
1946 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1947 BT_INFO("re-auth of legacy device is not possible.");
1948 } else {
1949 conn->link_mode |= HCI_LM_AUTH;
1950 conn->sec_level = conn->pending_sec_level;
1951 }
1952 } else {
1953 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1954 ev->status);
1955 }
1956
1957 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1958 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1959
1960 if (conn->state == BT_CONFIG) {
1961 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1962 struct hci_cp_set_conn_encrypt cp;
1963 cp.handle = ev->handle;
1964 cp.encrypt = 0x01;
1965 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1966 &cp);
1967 } else {
1968 conn->state = BT_CONNECTED;
1969 hci_proto_connect_cfm(conn, ev->status);
1970 hci_conn_put(conn);
1971 }
1972 } else {
1973 hci_auth_cfm(conn, ev->status);
1974
1975 hci_conn_hold(conn);
1976 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1977 hci_conn_put(conn);
1978 }
1979
1980 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1981 if (!ev->status) {
1982 struct hci_cp_set_conn_encrypt cp;
1983 cp.handle = ev->handle;
1984 cp.encrypt = 0x01;
1985 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1986 &cp);
1987 } else {
1988 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1989 hci_encrypt_cfm(conn, ev->status, 0x00);
1990 }
1991 }
1992
1993 unlock:
1994 hci_dev_unlock(hdev);
1995 }
1996
1997 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1998 {
1999 struct hci_ev_remote_name *ev = (void *) skb->data;
2000 struct hci_conn *conn;
2001
2002 BT_DBG("%s", hdev->name);
2003
2004 hci_conn_check_pending(hdev);
2005
2006 hci_dev_lock(hdev);
2007
2008 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2009
2010 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2011 goto check_auth;
2012
2013 if (ev->status == 0)
2014 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2015 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2016 else
2017 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2018
2019 check_auth:
2020 if (!conn)
2021 goto unlock;
2022
2023 if (!hci_outgoing_auth_needed(hdev, conn))
2024 goto unlock;
2025
2026 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2027 struct hci_cp_auth_requested cp;
2028 cp.handle = __cpu_to_le16(conn->handle);
2029 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2030 }
2031
2032 unlock:
2033 hci_dev_unlock(hdev);
2034 }
2035
2036 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2037 {
2038 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2039 struct hci_conn *conn;
2040
2041 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2042
2043 hci_dev_lock(hdev);
2044
2045 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2046 if (conn) {
2047 if (!ev->status) {
2048 if (ev->encrypt) {
2049 /* Encryption implies authentication */
2050 conn->link_mode |= HCI_LM_AUTH;
2051 conn->link_mode |= HCI_LM_ENCRYPT;
2052 conn->sec_level = conn->pending_sec_level;
2053 } else
2054 conn->link_mode &= ~HCI_LM_ENCRYPT;
2055 }
2056
2057 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2058
2059 if (ev->status && conn->state == BT_CONNECTED) {
2060 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2061 hci_conn_put(conn);
2062 goto unlock;
2063 }
2064
2065 if (conn->state == BT_CONFIG) {
2066 if (!ev->status)
2067 conn->state = BT_CONNECTED;
2068
2069 hci_proto_connect_cfm(conn, ev->status);
2070 hci_conn_put(conn);
2071 } else
2072 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2073 }
2074
2075 unlock:
2076 hci_dev_unlock(hdev);
2077 }
2078
2079 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2080 struct sk_buff *skb)
2081 {
2082 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2083 struct hci_conn *conn;
2084
2085 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2086
2087 hci_dev_lock(hdev);
2088
2089 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2090 if (conn) {
2091 if (!ev->status)
2092 conn->link_mode |= HCI_LM_SECURE;
2093
2094 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2095
2096 hci_key_change_cfm(conn, ev->status);
2097 }
2098
2099 hci_dev_unlock(hdev);
2100 }
2101
2102 static void hci_remote_features_evt(struct hci_dev *hdev,
2103 struct sk_buff *skb)
2104 {
2105 struct hci_ev_remote_features *ev = (void *) skb->data;
2106 struct hci_conn *conn;
2107
2108 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2109
2110 hci_dev_lock(hdev);
2111
2112 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2113 if (!conn)
2114 goto unlock;
2115
2116 if (!ev->status)
2117 memcpy(conn->features, ev->features, 8);
2118
2119 if (conn->state != BT_CONFIG)
2120 goto unlock;
2121
2122 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2123 struct hci_cp_read_remote_ext_features cp;
2124 cp.handle = ev->handle;
2125 cp.page = 0x01;
2126 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2127 sizeof(cp), &cp);
2128 goto unlock;
2129 }
2130
2131 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2132 struct hci_cp_remote_name_req cp;
2133 memset(&cp, 0, sizeof(cp));
2134 bacpy(&cp.bdaddr, &conn->dst);
2135 cp.pscan_rep_mode = 0x02;
2136 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2137 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2138 mgmt_device_connected(hdev, &conn->dst, conn->type,
2139 conn->dst_type, 0, NULL, 0,
2140 conn->dev_class);
2141
2142 if (!hci_outgoing_auth_needed(hdev, conn)) {
2143 conn->state = BT_CONNECTED;
2144 hci_proto_connect_cfm(conn, ev->status);
2145 hci_conn_put(conn);
2146 }
2147
2148 unlock:
2149 hci_dev_unlock(hdev);
2150 }
2151
2152 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2153 {
2154 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2155 u8 status = skb->data[sizeof(*ev)];
2156 __u16 opcode;
2157
2158 skb_pull(skb, sizeof(*ev));
2159
2160 opcode = __le16_to_cpu(ev->opcode);
2161
2162 switch (opcode) {
2163 case HCI_OP_INQUIRY_CANCEL:
2164 hci_cc_inquiry_cancel(hdev, skb);
2165 break;
2166
2167 case HCI_OP_PERIODIC_INQ:
2168 hci_cc_periodic_inq(hdev, skb);
2169 break;
2170
2171 case HCI_OP_EXIT_PERIODIC_INQ:
2172 hci_cc_exit_periodic_inq(hdev, skb);
2173 break;
2174
2175 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2176 hci_cc_remote_name_req_cancel(hdev, skb);
2177 break;
2178
2179 case HCI_OP_ROLE_DISCOVERY:
2180 hci_cc_role_discovery(hdev, skb);
2181 break;
2182
2183 case HCI_OP_READ_LINK_POLICY:
2184 hci_cc_read_link_policy(hdev, skb);
2185 break;
2186
2187 case HCI_OP_WRITE_LINK_POLICY:
2188 hci_cc_write_link_policy(hdev, skb);
2189 break;
2190
2191 case HCI_OP_READ_DEF_LINK_POLICY:
2192 hci_cc_read_def_link_policy(hdev, skb);
2193 break;
2194
2195 case HCI_OP_WRITE_DEF_LINK_POLICY:
2196 hci_cc_write_def_link_policy(hdev, skb);
2197 break;
2198
2199 case HCI_OP_RESET:
2200 hci_cc_reset(hdev, skb);
2201 break;
2202
2203 case HCI_OP_WRITE_LOCAL_NAME:
2204 hci_cc_write_local_name(hdev, skb);
2205 break;
2206
2207 case HCI_OP_READ_LOCAL_NAME:
2208 hci_cc_read_local_name(hdev, skb);
2209 break;
2210
2211 case HCI_OP_WRITE_AUTH_ENABLE:
2212 hci_cc_write_auth_enable(hdev, skb);
2213 break;
2214
2215 case HCI_OP_WRITE_ENCRYPT_MODE:
2216 hci_cc_write_encrypt_mode(hdev, skb);
2217 break;
2218
2219 case HCI_OP_WRITE_SCAN_ENABLE:
2220 hci_cc_write_scan_enable(hdev, skb);
2221 break;
2222
2223 case HCI_OP_READ_CLASS_OF_DEV:
2224 hci_cc_read_class_of_dev(hdev, skb);
2225 break;
2226
2227 case HCI_OP_WRITE_CLASS_OF_DEV:
2228 hci_cc_write_class_of_dev(hdev, skb);
2229 break;
2230
2231 case HCI_OP_READ_VOICE_SETTING:
2232 hci_cc_read_voice_setting(hdev, skb);
2233 break;
2234
2235 case HCI_OP_WRITE_VOICE_SETTING:
2236 hci_cc_write_voice_setting(hdev, skb);
2237 break;
2238
2239 case HCI_OP_WRITE_SSP_MODE:
2240 hci_cc_write_ssp_mode(hdev, skb);
2241 break;
2242
2243 case HCI_OP_READ_LOCAL_VERSION:
2244 hci_cc_read_local_version(hdev, skb);
2245 break;
2246
2247 case HCI_OP_READ_LOCAL_COMMANDS:
2248 hci_cc_read_local_commands(hdev, skb);
2249 break;
2250
2251 case HCI_OP_READ_LOCAL_FEATURES:
2252 hci_cc_read_local_features(hdev, skb);
2253 break;
2254
2255 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2256 hci_cc_read_local_ext_features(hdev, skb);
2257 break;
2258
2259 case HCI_OP_READ_BUFFER_SIZE:
2260 hci_cc_read_buffer_size(hdev, skb);
2261 break;
2262
2263 case HCI_OP_READ_BD_ADDR:
2264 hci_cc_read_bd_addr(hdev, skb);
2265 break;
2266
2267 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2268 hci_cc_read_page_scan_activity(hdev, skb);
2269 break;
2270
2271 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2272 hci_cc_write_page_scan_activity(hdev, skb);
2273 break;
2274
2275 case HCI_OP_READ_PAGE_SCAN_TYPE:
2276 hci_cc_read_page_scan_type(hdev, skb);
2277 break;
2278
2279 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2280 hci_cc_write_page_scan_type(hdev, skb);
2281 break;
2282
2283 case HCI_OP_READ_DATA_BLOCK_SIZE:
2284 hci_cc_read_data_block_size(hdev, skb);
2285 break;
2286
2287 case HCI_OP_READ_FLOW_CONTROL_MODE:
2288 hci_cc_read_flow_control_mode(hdev, skb);
2289 break;
2290
2291 case HCI_OP_READ_LOCAL_AMP_INFO:
2292 hci_cc_read_local_amp_info(hdev, skb);
2293 break;
2294
2295 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2296 hci_cc_read_local_amp_assoc(hdev, skb);
2297 break;
2298
2299 case HCI_OP_READ_INQ_RSP_TX_POWER:
2300 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2301 break;
2302
2303 case HCI_OP_PIN_CODE_REPLY:
2304 hci_cc_pin_code_reply(hdev, skb);
2305 break;
2306
2307 case HCI_OP_PIN_CODE_NEG_REPLY:
2308 hci_cc_pin_code_neg_reply(hdev, skb);
2309 break;
2310
2311 case HCI_OP_READ_LOCAL_OOB_DATA:
2312 hci_cc_read_local_oob_data_reply(hdev, skb);
2313 break;
2314
2315 case HCI_OP_LE_READ_BUFFER_SIZE:
2316 hci_cc_le_read_buffer_size(hdev, skb);
2317 break;
2318
2319 case HCI_OP_LE_READ_LOCAL_FEATURES:
2320 hci_cc_le_read_local_features(hdev, skb);
2321 break;
2322
2323 case HCI_OP_LE_READ_ADV_TX_POWER:
2324 hci_cc_le_read_adv_tx_power(hdev, skb);
2325 break;
2326
2327 case HCI_OP_USER_CONFIRM_REPLY:
2328 hci_cc_user_confirm_reply(hdev, skb);
2329 break;
2330
2331 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2332 hci_cc_user_confirm_neg_reply(hdev, skb);
2333 break;
2334
2335 case HCI_OP_USER_PASSKEY_REPLY:
2336 hci_cc_user_passkey_reply(hdev, skb);
2337 break;
2338
2339 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2340 hci_cc_user_passkey_neg_reply(hdev, skb);
2341 break;
2342
2343 case HCI_OP_LE_SET_SCAN_PARAM:
2344 hci_cc_le_set_scan_param(hdev, skb);
2345 break;
2346
2347 case HCI_OP_LE_SET_ADV_ENABLE:
2348 hci_cc_le_set_adv_enable(hdev, skb);
2349 break;
2350
2351 case HCI_OP_LE_SET_SCAN_ENABLE:
2352 hci_cc_le_set_scan_enable(hdev, skb);
2353 break;
2354
2355 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2356 hci_cc_le_read_white_list_size(hdev, skb);
2357 break;
2358
2359 case HCI_OP_LE_READ_SUPPORTED_STATES:
2360 hci_cc_le_read_supported_states(hdev, skb);
2361 break;
2362
2363 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2364 hci_cc_write_le_host_supported(hdev, skb);
2365 break;
2366
2367 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2368 hci_cc_write_remote_amp_assoc(hdev, skb);
2369 break;
2370
2371 default:
2372 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2373 break;
2374 }
2375
2376 if (opcode != HCI_OP_NOP)
2377 del_timer(&hdev->cmd_timer);
2378
2379 hci_req_cmd_complete(hdev, opcode, status);
2380
2381 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2382 atomic_set(&hdev->cmd_cnt, 1);
2383 if (!skb_queue_empty(&hdev->cmd_q))
2384 queue_work(hdev->workqueue, &hdev->cmd_work);
2385 }
2386 }
2387
2388 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2389 {
2390 struct hci_ev_cmd_status *ev = (void *) skb->data;
2391 __u16 opcode;
2392
2393 skb_pull(skb, sizeof(*ev));
2394
2395 opcode = __le16_to_cpu(ev->opcode);
2396
2397 switch (opcode) {
2398 case HCI_OP_INQUIRY:
2399 hci_cs_inquiry(hdev, ev->status);
2400 break;
2401
2402 case HCI_OP_CREATE_CONN:
2403 hci_cs_create_conn(hdev, ev->status);
2404 break;
2405
2406 case HCI_OP_ADD_SCO:
2407 hci_cs_add_sco(hdev, ev->status);
2408 break;
2409
2410 case HCI_OP_AUTH_REQUESTED:
2411 hci_cs_auth_requested(hdev, ev->status);
2412 break;
2413
2414 case HCI_OP_SET_CONN_ENCRYPT:
2415 hci_cs_set_conn_encrypt(hdev, ev->status);
2416 break;
2417
2418 case HCI_OP_REMOTE_NAME_REQ:
2419 hci_cs_remote_name_req(hdev, ev->status);
2420 break;
2421
2422 case HCI_OP_READ_REMOTE_FEATURES:
2423 hci_cs_read_remote_features(hdev, ev->status);
2424 break;
2425
2426 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2427 hci_cs_read_remote_ext_features(hdev, ev->status);
2428 break;
2429
2430 case HCI_OP_SETUP_SYNC_CONN:
2431 hci_cs_setup_sync_conn(hdev, ev->status);
2432 break;
2433
2434 case HCI_OP_SNIFF_MODE:
2435 hci_cs_sniff_mode(hdev, ev->status);
2436 break;
2437
2438 case HCI_OP_EXIT_SNIFF_MODE:
2439 hci_cs_exit_sniff_mode(hdev, ev->status);
2440 break;
2441
2442 case HCI_OP_DISCONNECT:
2443 hci_cs_disconnect(hdev, ev->status);
2444 break;
2445
2446 case HCI_OP_LE_CREATE_CONN:
2447 hci_cs_le_create_conn(hdev, ev->status);
2448 break;
2449
2450 case HCI_OP_CREATE_PHY_LINK:
2451 hci_cs_create_phylink(hdev, ev->status);
2452 break;
2453
2454 case HCI_OP_ACCEPT_PHY_LINK:
2455 hci_cs_accept_phylink(hdev, ev->status);
2456 break;
2457
2458 default:
2459 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2460 break;
2461 }
2462
2463 if (opcode != HCI_OP_NOP)
2464 del_timer(&hdev->cmd_timer);
2465
2466 hci_req_cmd_complete(hdev, opcode, ev->status);
2467
2468 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2469 atomic_set(&hdev->cmd_cnt, 1);
2470 if (!skb_queue_empty(&hdev->cmd_q))
2471 queue_work(hdev->workqueue, &hdev->cmd_work);
2472 }
2473 }
2474
2475 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2476 {
2477 struct hci_ev_role_change *ev = (void *) skb->data;
2478 struct hci_conn *conn;
2479
2480 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2481
2482 hci_dev_lock(hdev);
2483
2484 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2485 if (conn) {
2486 if (!ev->status) {
2487 if (ev->role)
2488 conn->link_mode &= ~HCI_LM_MASTER;
2489 else
2490 conn->link_mode |= HCI_LM_MASTER;
2491 }
2492
2493 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2494
2495 hci_role_switch_cfm(conn, ev->status, ev->role);
2496 }
2497
2498 hci_dev_unlock(hdev);
2499 }
2500
2501 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2502 {
2503 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2504 int i;
2505
2506 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2507 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2508 return;
2509 }
2510
2511 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2512 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2513 BT_DBG("%s bad parameters", hdev->name);
2514 return;
2515 }
2516
2517 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2518
2519 for (i = 0; i < ev->num_hndl; i++) {
2520 struct hci_comp_pkts_info *info = &ev->handles[i];
2521 struct hci_conn *conn;
2522 __u16 handle, count;
2523
2524 handle = __le16_to_cpu(info->handle);
2525 count = __le16_to_cpu(info->count);
2526
2527 conn = hci_conn_hash_lookup_handle(hdev, handle);
2528 if (!conn)
2529 continue;
2530
2531 conn->sent -= count;
2532
2533 switch (conn->type) {
2534 case ACL_LINK:
2535 hdev->acl_cnt += count;
2536 if (hdev->acl_cnt > hdev->acl_pkts)
2537 hdev->acl_cnt = hdev->acl_pkts;
2538 break;
2539
2540 case LE_LINK:
2541 if (hdev->le_pkts) {
2542 hdev->le_cnt += count;
2543 if (hdev->le_cnt > hdev->le_pkts)
2544 hdev->le_cnt = hdev->le_pkts;
2545 } else {
2546 hdev->acl_cnt += count;
2547 if (hdev->acl_cnt > hdev->acl_pkts)
2548 hdev->acl_cnt = hdev->acl_pkts;
2549 }
2550 break;
2551
2552 case SCO_LINK:
2553 hdev->sco_cnt += count;
2554 if (hdev->sco_cnt > hdev->sco_pkts)
2555 hdev->sco_cnt = hdev->sco_pkts;
2556 break;
2557
2558 default:
2559 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2560 break;
2561 }
2562 }
2563
2564 queue_work(hdev->workqueue, &hdev->tx_work);
2565 }
2566
2567 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2568 __u16 handle)
2569 {
2570 struct hci_chan *chan;
2571
2572 switch (hdev->dev_type) {
2573 case HCI_BREDR:
2574 return hci_conn_hash_lookup_handle(hdev, handle);
2575 case HCI_AMP:
2576 chan = hci_chan_lookup_handle(hdev, handle);
2577 if (chan)
2578 return chan->conn;
2579 break;
2580 default:
2581 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2582 break;
2583 }
2584
2585 return NULL;
2586 }
2587
2588 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2589 {
2590 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2591 int i;
2592
2593 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2594 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2595 return;
2596 }
2597
2598 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2599 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2600 BT_DBG("%s bad parameters", hdev->name);
2601 return;
2602 }
2603
2604 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2605 ev->num_hndl);
2606
2607 for (i = 0; i < ev->num_hndl; i++) {
2608 struct hci_comp_blocks_info *info = &ev->handles[i];
2609 struct hci_conn *conn = NULL;
2610 __u16 handle, block_count;
2611
2612 handle = __le16_to_cpu(info->handle);
2613 block_count = __le16_to_cpu(info->blocks);
2614
2615 conn = __hci_conn_lookup_handle(hdev, handle);
2616 if (!conn)
2617 continue;
2618
2619 conn->sent -= block_count;
2620
2621 switch (conn->type) {
2622 case ACL_LINK:
2623 case AMP_LINK:
2624 hdev->block_cnt += block_count;
2625 if (hdev->block_cnt > hdev->num_blocks)
2626 hdev->block_cnt = hdev->num_blocks;
2627 break;
2628
2629 default:
2630 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2631 break;
2632 }
2633 }
2634
2635 queue_work(hdev->workqueue, &hdev->tx_work);
2636 }
2637
2638 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2639 {
2640 struct hci_ev_mode_change *ev = (void *) skb->data;
2641 struct hci_conn *conn;
2642
2643 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2644
2645 hci_dev_lock(hdev);
2646
2647 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2648 if (conn) {
2649 conn->mode = ev->mode;
2650 conn->interval = __le16_to_cpu(ev->interval);
2651
2652 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2653 &conn->flags)) {
2654 if (conn->mode == HCI_CM_ACTIVE)
2655 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2656 else
2657 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2658 }
2659
2660 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2661 hci_sco_setup(conn, ev->status);
2662 }
2663
2664 hci_dev_unlock(hdev);
2665 }
2666
2667 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2668 {
2669 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2670 struct hci_conn *conn;
2671
2672 BT_DBG("%s", hdev->name);
2673
2674 hci_dev_lock(hdev);
2675
2676 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2677 if (!conn)
2678 goto unlock;
2679
2680 if (conn->state == BT_CONNECTED) {
2681 hci_conn_hold(conn);
2682 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2683 hci_conn_put(conn);
2684 }
2685
2686 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2687 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2688 sizeof(ev->bdaddr), &ev->bdaddr);
2689 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2690 u8 secure;
2691
2692 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2693 secure = 1;
2694 else
2695 secure = 0;
2696
2697 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2698 }
2699
2700 unlock:
2701 hci_dev_unlock(hdev);
2702 }
2703
2704 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2705 {
2706 struct hci_ev_link_key_req *ev = (void *) skb->data;
2707 struct hci_cp_link_key_reply cp;
2708 struct hci_conn *conn;
2709 struct link_key *key;
2710
2711 BT_DBG("%s", hdev->name);
2712
2713 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2714 return;
2715
2716 hci_dev_lock(hdev);
2717
2718 key = hci_find_link_key(hdev, &ev->bdaddr);
2719 if (!key) {
2720 BT_DBG("%s link key not found for %pMR", hdev->name,
2721 &ev->bdaddr);
2722 goto not_found;
2723 }
2724
2725 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2726 &ev->bdaddr);
2727
2728 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2729 key->type == HCI_LK_DEBUG_COMBINATION) {
2730 BT_DBG("%s ignoring debug key", hdev->name);
2731 goto not_found;
2732 }
2733
2734 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2735 if (conn) {
2736 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2737 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2738 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2739 goto not_found;
2740 }
2741
2742 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2743 conn->pending_sec_level == BT_SECURITY_HIGH) {
2744 BT_DBG("%s ignoring key unauthenticated for high security",
2745 hdev->name);
2746 goto not_found;
2747 }
2748
2749 conn->key_type = key->type;
2750 conn->pin_length = key->pin_len;
2751 }
2752
2753 bacpy(&cp.bdaddr, &ev->bdaddr);
2754 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2755
2756 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2757
2758 hci_dev_unlock(hdev);
2759
2760 return;
2761
2762 not_found:
2763 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2764 hci_dev_unlock(hdev);
2765 }
2766
2767 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2768 {
2769 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2770 struct hci_conn *conn;
2771 u8 pin_len = 0;
2772
2773 BT_DBG("%s", hdev->name);
2774
2775 hci_dev_lock(hdev);
2776
2777 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2778 if (conn) {
2779 hci_conn_hold(conn);
2780 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2781 pin_len = conn->pin_length;
2782
2783 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2784 conn->key_type = ev->key_type;
2785
2786 hci_conn_put(conn);
2787 }
2788
2789 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2790 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2791 ev->key_type, pin_len);
2792
2793 hci_dev_unlock(hdev);
2794 }
2795
2796 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2797 {
2798 struct hci_ev_clock_offset *ev = (void *) skb->data;
2799 struct hci_conn *conn;
2800
2801 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2802
2803 hci_dev_lock(hdev);
2804
2805 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2806 if (conn && !ev->status) {
2807 struct inquiry_entry *ie;
2808
2809 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2810 if (ie) {
2811 ie->data.clock_offset = ev->clock_offset;
2812 ie->timestamp = jiffies;
2813 }
2814 }
2815
2816 hci_dev_unlock(hdev);
2817 }
2818
2819 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2820 {
2821 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2822 struct hci_conn *conn;
2823
2824 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2825
2826 hci_dev_lock(hdev);
2827
2828 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2829 if (conn && !ev->status)
2830 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2831
2832 hci_dev_unlock(hdev);
2833 }
2834
2835 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2836 {
2837 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2838 struct inquiry_entry *ie;
2839
2840 BT_DBG("%s", hdev->name);
2841
2842 hci_dev_lock(hdev);
2843
2844 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2845 if (ie) {
2846 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2847 ie->timestamp = jiffies;
2848 }
2849
2850 hci_dev_unlock(hdev);
2851 }
2852
2853 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2854 struct sk_buff *skb)
2855 {
2856 struct inquiry_data data;
2857 int num_rsp = *((__u8 *) skb->data);
2858 bool name_known, ssp;
2859
2860 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2861
2862 if (!num_rsp)
2863 return;
2864
2865 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2866 return;
2867
2868 hci_dev_lock(hdev);
2869
2870 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2871 struct inquiry_info_with_rssi_and_pscan_mode *info;
2872 info = (void *) (skb->data + 1);
2873
2874 for (; num_rsp; num_rsp--, info++) {
2875 bacpy(&data.bdaddr, &info->bdaddr);
2876 data.pscan_rep_mode = info->pscan_rep_mode;
2877 data.pscan_period_mode = info->pscan_period_mode;
2878 data.pscan_mode = info->pscan_mode;
2879 memcpy(data.dev_class, info->dev_class, 3);
2880 data.clock_offset = info->clock_offset;
2881 data.rssi = info->rssi;
2882 data.ssp_mode = 0x00;
2883
2884 name_known = hci_inquiry_cache_update(hdev, &data,
2885 false, &ssp);
2886 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2887 info->dev_class, info->rssi,
2888 !name_known, ssp, NULL, 0);
2889 }
2890 } else {
2891 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2892
2893 for (; num_rsp; num_rsp--, info++) {
2894 bacpy(&data.bdaddr, &info->bdaddr);
2895 data.pscan_rep_mode = info->pscan_rep_mode;
2896 data.pscan_period_mode = info->pscan_period_mode;
2897 data.pscan_mode = 0x00;
2898 memcpy(data.dev_class, info->dev_class, 3);
2899 data.clock_offset = info->clock_offset;
2900 data.rssi = info->rssi;
2901 data.ssp_mode = 0x00;
2902 name_known = hci_inquiry_cache_update(hdev, &data,
2903 false, &ssp);
2904 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2905 info->dev_class, info->rssi,
2906 !name_known, ssp, NULL, 0);
2907 }
2908 }
2909
2910 hci_dev_unlock(hdev);
2911 }
2912
2913 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2914 struct sk_buff *skb)
2915 {
2916 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2917 struct hci_conn *conn;
2918
2919 BT_DBG("%s", hdev->name);
2920
2921 hci_dev_lock(hdev);
2922
2923 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2924 if (!conn)
2925 goto unlock;
2926
2927 if (!ev->status && ev->page == 0x01) {
2928 struct inquiry_entry *ie;
2929
2930 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2931 if (ie)
2932 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2933
2934 if (ev->features[0] & LMP_HOST_SSP)
2935 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2936 }
2937
2938 if (conn->state != BT_CONFIG)
2939 goto unlock;
2940
2941 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2942 struct hci_cp_remote_name_req cp;
2943 memset(&cp, 0, sizeof(cp));
2944 bacpy(&cp.bdaddr, &conn->dst);
2945 cp.pscan_rep_mode = 0x02;
2946 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2947 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2948 mgmt_device_connected(hdev, &conn->dst, conn->type,
2949 conn->dst_type, 0, NULL, 0,
2950 conn->dev_class);
2951
2952 if (!hci_outgoing_auth_needed(hdev, conn)) {
2953 conn->state = BT_CONNECTED;
2954 hci_proto_connect_cfm(conn, ev->status);
2955 hci_conn_put(conn);
2956 }
2957
2958 unlock:
2959 hci_dev_unlock(hdev);
2960 }
2961
2962 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2963 struct sk_buff *skb)
2964 {
2965 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2966 struct hci_conn *conn;
2967
2968 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2969
2970 hci_dev_lock(hdev);
2971
2972 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2973 if (!conn) {
2974 if (ev->link_type == ESCO_LINK)
2975 goto unlock;
2976
2977 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2978 if (!conn)
2979 goto unlock;
2980
2981 conn->type = SCO_LINK;
2982 }
2983
2984 switch (ev->status) {
2985 case 0x00:
2986 conn->handle = __le16_to_cpu(ev->handle);
2987 conn->state = BT_CONNECTED;
2988
2989 hci_conn_hold_device(conn);
2990 hci_conn_add_sysfs(conn);
2991 break;
2992
2993 case 0x11: /* Unsupported Feature or Parameter Value */
2994 case 0x1c: /* SCO interval rejected */
2995 case 0x1a: /* Unsupported Remote Feature */
2996 case 0x1f: /* Unspecified error */
2997 if (conn->out && conn->attempt < 2) {
2998 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2999 (hdev->esco_type & EDR_ESCO_MASK);
3000 hci_setup_sync(conn, conn->link->handle);
3001 goto unlock;
3002 }
3003 /* fall through */
3004
3005 default:
3006 conn->state = BT_CLOSED;
3007 break;
3008 }
3009
3010 hci_proto_connect_cfm(conn, ev->status);
3011 if (ev->status)
3012 hci_conn_del(conn);
3013
3014 unlock:
3015 hci_dev_unlock(hdev);
3016 }
3017
3018 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3019 struct sk_buff *skb)
3020 {
3021 struct inquiry_data data;
3022 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3023 int num_rsp = *((__u8 *) skb->data);
3024 size_t eir_len;
3025
3026 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3027
3028 if (!num_rsp)
3029 return;
3030
3031 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3032 return;
3033
3034 hci_dev_lock(hdev);
3035
3036 for (; num_rsp; num_rsp--, info++) {
3037 bool name_known, ssp;
3038
3039 bacpy(&data.bdaddr, &info->bdaddr);
3040 data.pscan_rep_mode = info->pscan_rep_mode;
3041 data.pscan_period_mode = info->pscan_period_mode;
3042 data.pscan_mode = 0x00;
3043 memcpy(data.dev_class, info->dev_class, 3);
3044 data.clock_offset = info->clock_offset;
3045 data.rssi = info->rssi;
3046 data.ssp_mode = 0x01;
3047
3048 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3049 name_known = eir_has_data_type(info->data,
3050 sizeof(info->data),
3051 EIR_NAME_COMPLETE);
3052 else
3053 name_known = true;
3054
3055 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3056 &ssp);
3057 eir_len = eir_get_length(info->data, sizeof(info->data));
3058 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3059 info->dev_class, info->rssi, !name_known,
3060 ssp, info->data, eir_len);
3061 }
3062
3063 hci_dev_unlock(hdev);
3064 }
3065
3066 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3067 struct sk_buff *skb)
3068 {
3069 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3070 struct hci_conn *conn;
3071
3072 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3073 __le16_to_cpu(ev->handle));
3074
3075 hci_dev_lock(hdev);
3076
3077 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3078 if (!conn)
3079 goto unlock;
3080
3081 if (!ev->status)
3082 conn->sec_level = conn->pending_sec_level;
3083
3084 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3085
3086 if (ev->status && conn->state == BT_CONNECTED) {
3087 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3088 hci_conn_put(conn);
3089 goto unlock;
3090 }
3091
3092 if (conn->state == BT_CONFIG) {
3093 if (!ev->status)
3094 conn->state = BT_CONNECTED;
3095
3096 hci_proto_connect_cfm(conn, ev->status);
3097 hci_conn_put(conn);
3098 } else {
3099 hci_auth_cfm(conn, ev->status);
3100
3101 hci_conn_hold(conn);
3102 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3103 hci_conn_put(conn);
3104 }
3105
3106 unlock:
3107 hci_dev_unlock(hdev);
3108 }
3109
3110 static u8 hci_get_auth_req(struct hci_conn *conn)
3111 {
3112 /* If remote requests dedicated bonding follow that lead */
3113 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3114 /* If both remote and local IO capabilities allow MITM
3115 * protection then require it, otherwise don't */
3116 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3117 return 0x02;
3118 else
3119 return 0x03;
3120 }
3121
3122 /* If remote requests no-bonding follow that lead */
3123 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3124 return conn->remote_auth | (conn->auth_type & 0x01);
3125
3126 return conn->auth_type;
3127 }
3128
3129 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3130 {
3131 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3132 struct hci_conn *conn;
3133
3134 BT_DBG("%s", hdev->name);
3135
3136 hci_dev_lock(hdev);
3137
3138 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3139 if (!conn)
3140 goto unlock;
3141
3142 hci_conn_hold(conn);
3143
3144 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3145 goto unlock;
3146
3147 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3148 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3149 struct hci_cp_io_capability_reply cp;
3150
3151 bacpy(&cp.bdaddr, &ev->bdaddr);
3152 /* Change the IO capability from KeyboardDisplay
3153 * to DisplayYesNo as it is not supported by BT spec. */
3154 cp.capability = (conn->io_capability == 0x04) ?
3155 0x01 : conn->io_capability;
3156 conn->auth_type = hci_get_auth_req(conn);
3157 cp.authentication = conn->auth_type;
3158
3159 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3160 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3161 cp.oob_data = 0x01;
3162 else
3163 cp.oob_data = 0x00;
3164
3165 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3166 sizeof(cp), &cp);
3167 } else {
3168 struct hci_cp_io_capability_neg_reply cp;
3169
3170 bacpy(&cp.bdaddr, &ev->bdaddr);
3171 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3172
3173 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3174 sizeof(cp), &cp);
3175 }
3176
3177 unlock:
3178 hci_dev_unlock(hdev);
3179 }
3180
3181 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3182 {
3183 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3184 struct hci_conn *conn;
3185
3186 BT_DBG("%s", hdev->name);
3187
3188 hci_dev_lock(hdev);
3189
3190 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3191 if (!conn)
3192 goto unlock;
3193
3194 conn->remote_cap = ev->capability;
3195 conn->remote_auth = ev->authentication;
3196 if (ev->oob_data)
3197 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3198
3199 unlock:
3200 hci_dev_unlock(hdev);
3201 }
3202
3203 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3204 struct sk_buff *skb)
3205 {
3206 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3207 int loc_mitm, rem_mitm, confirm_hint = 0;
3208 struct hci_conn *conn;
3209
3210 BT_DBG("%s", hdev->name);
3211
3212 hci_dev_lock(hdev);
3213
3214 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3215 goto unlock;
3216
3217 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3218 if (!conn)
3219 goto unlock;
3220
3221 loc_mitm = (conn->auth_type & 0x01);
3222 rem_mitm = (conn->remote_auth & 0x01);
3223
3224 /* If we require MITM but the remote device can't provide that
3225 * (it has NoInputNoOutput) then reject the confirmation
3226 * request. The only exception is when we're dedicated bonding
3227 * initiators (connect_cfm_cb set) since then we always have the MITM
3228 * bit set. */
3229 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3230 BT_DBG("Rejecting request: remote device can't provide MITM");
3231 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3232 sizeof(ev->bdaddr), &ev->bdaddr);
3233 goto unlock;
3234 }
3235
3236 /* If no side requires MITM protection; auto-accept */
3237 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3238 (!rem_mitm || conn->io_capability == 0x03)) {
3239
3240 /* If we're not the initiators request authorization to
3241 * proceed from user space (mgmt_user_confirm with
3242 * confirm_hint set to 1). */
3243 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3244 BT_DBG("Confirming auto-accept as acceptor");
3245 confirm_hint = 1;
3246 goto confirm;
3247 }
3248
3249 BT_DBG("Auto-accept of user confirmation with %ums delay",
3250 hdev->auto_accept_delay);
3251
3252 if (hdev->auto_accept_delay > 0) {
3253 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3254 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3255 goto unlock;
3256 }
3257
3258 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3259 sizeof(ev->bdaddr), &ev->bdaddr);
3260 goto unlock;
3261 }
3262
3263 confirm:
3264 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3265 confirm_hint);
3266
3267 unlock:
3268 hci_dev_unlock(hdev);
3269 }
3270
3271 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3272 struct sk_buff *skb)
3273 {
3274 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3275
3276 BT_DBG("%s", hdev->name);
3277
3278 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3279 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3280 }
3281
3282 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3283 struct sk_buff *skb)
3284 {
3285 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3286 struct hci_conn *conn;
3287
3288 BT_DBG("%s", hdev->name);
3289
3290 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3291 if (!conn)
3292 return;
3293
3294 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3295 conn->passkey_entered = 0;
3296
3297 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3298 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3299 conn->dst_type, conn->passkey_notify,
3300 conn->passkey_entered);
3301 }
3302
3303 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3304 {
3305 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3306 struct hci_conn *conn;
3307
3308 BT_DBG("%s", hdev->name);
3309
3310 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3311 if (!conn)
3312 return;
3313
3314 switch (ev->type) {
3315 case HCI_KEYPRESS_STARTED:
3316 conn->passkey_entered = 0;
3317 return;
3318
3319 case HCI_KEYPRESS_ENTERED:
3320 conn->passkey_entered++;
3321 break;
3322
3323 case HCI_KEYPRESS_ERASED:
3324 conn->passkey_entered--;
3325 break;
3326
3327 case HCI_KEYPRESS_CLEARED:
3328 conn->passkey_entered = 0;
3329 break;
3330
3331 case HCI_KEYPRESS_COMPLETED:
3332 return;
3333 }
3334
3335 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3336 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3337 conn->dst_type, conn->passkey_notify,
3338 conn->passkey_entered);
3339 }
3340
3341 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3342 struct sk_buff *skb)
3343 {
3344 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3345 struct hci_conn *conn;
3346
3347 BT_DBG("%s", hdev->name);
3348
3349 hci_dev_lock(hdev);
3350
3351 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3352 if (!conn)
3353 goto unlock;
3354
3355 /* To avoid duplicate auth_failed events to user space we check
3356 * the HCI_CONN_AUTH_PEND flag which will be set if we
3357 * initiated the authentication. A traditional auth_complete
3358 * event gets always produced as initiator and is also mapped to
3359 * the mgmt_auth_failed event */
3360 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3361 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3362 ev->status);
3363
3364 hci_conn_put(conn);
3365
3366 unlock:
3367 hci_dev_unlock(hdev);
3368 }
3369
3370 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3371 struct sk_buff *skb)
3372 {
3373 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3374 struct inquiry_entry *ie;
3375
3376 BT_DBG("%s", hdev->name);
3377
3378 hci_dev_lock(hdev);
3379
3380 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3381 if (ie)
3382 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3383
3384 hci_dev_unlock(hdev);
3385 }
3386
3387 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3388 struct sk_buff *skb)
3389 {
3390 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3391 struct oob_data *data;
3392
3393 BT_DBG("%s", hdev->name);
3394
3395 hci_dev_lock(hdev);
3396
3397 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3398 goto unlock;
3399
3400 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3401 if (data) {
3402 struct hci_cp_remote_oob_data_reply cp;
3403
3404 bacpy(&cp.bdaddr, &ev->bdaddr);
3405 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3406 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3407
3408 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3409 &cp);
3410 } else {
3411 struct hci_cp_remote_oob_data_neg_reply cp;
3412
3413 bacpy(&cp.bdaddr, &ev->bdaddr);
3414 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3415 &cp);
3416 }
3417
3418 unlock:
3419 hci_dev_unlock(hdev);
3420 }
3421
3422 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3423 struct sk_buff *skb)
3424 {
3425 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3426 struct hci_conn *hcon, *bredr_hcon;
3427
3428 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3429 ev->status);
3430
3431 hci_dev_lock(hdev);
3432
3433 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3434 if (!hcon) {
3435 hci_dev_unlock(hdev);
3436 return;
3437 }
3438
3439 if (ev->status) {
3440 hci_conn_del(hcon);
3441 hci_dev_unlock(hdev);
3442 return;
3443 }
3444
3445 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3446
3447 hcon->state = BT_CONNECTED;
3448 bacpy(&hcon->dst, &bredr_hcon->dst);
3449
3450 hci_conn_hold(hcon);
3451 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3452 hci_conn_put(hcon);
3453
3454 hci_conn_hold_device(hcon);
3455 hci_conn_add_sysfs(hcon);
3456
3457 amp_physical_cfm(bredr_hcon, hcon);
3458
3459 hci_dev_unlock(hdev);
3460 }
3461
3462 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3463 {
3464 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3465 struct hci_conn *hcon;
3466 struct hci_chan *hchan;
3467 struct amp_mgr *mgr;
3468
3469 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3470 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3471 ev->status);
3472
3473 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3474 if (!hcon)
3475 return;
3476
3477 /* Create AMP hchan */
3478 hchan = hci_chan_create(hcon);
3479 if (!hchan)
3480 return;
3481
3482 hchan->handle = le16_to_cpu(ev->handle);
3483
3484 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3485
3486 mgr = hcon->amp_mgr;
3487 if (mgr && mgr->bredr_chan) {
3488 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3489
3490 l2cap_chan_lock(bredr_chan);
3491
3492 bredr_chan->conn->mtu = hdev->block_mtu;
3493 l2cap_logical_cfm(bredr_chan, hchan, 0);
3494 hci_conn_hold(hcon);
3495
3496 l2cap_chan_unlock(bredr_chan);
3497 }
3498 }
3499
3500 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3501 struct sk_buff *skb)
3502 {
3503 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3504 struct hci_chan *hchan;
3505
3506 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3507 le16_to_cpu(ev->handle), ev->status);
3508
3509 if (ev->status)
3510 return;
3511
3512 hci_dev_lock(hdev);
3513
3514 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3515 if (!hchan)
3516 goto unlock;
3517
3518 amp_destroy_logical_link(hchan, ev->reason);
3519
3520 unlock:
3521 hci_dev_unlock(hdev);
3522 }
3523
3524 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3525 struct sk_buff *skb)
3526 {
3527 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3528 struct hci_conn *hcon;
3529
3530 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3531
3532 if (ev->status)
3533 return;
3534
3535 hci_dev_lock(hdev);
3536
3537 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3538 if (hcon) {
3539 hcon->state = BT_CLOSED;
3540 hci_conn_del(hcon);
3541 }
3542
3543 hci_dev_unlock(hdev);
3544 }
3545
3546 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3547 {
3548 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3549 struct hci_conn *conn;
3550
3551 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3552
3553 hci_dev_lock(hdev);
3554
3555 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3556 if (!conn) {
3557 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3558 if (!conn) {
3559 BT_ERR("No memory for new connection");
3560 goto unlock;
3561 }
3562
3563 conn->dst_type = ev->bdaddr_type;
3564
3565 if (ev->role == LE_CONN_ROLE_MASTER) {
3566 conn->out = true;
3567 conn->link_mode |= HCI_LM_MASTER;
3568 }
3569 }
3570
3571 if (ev->status) {
3572 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3573 conn->dst_type, ev->status);
3574 hci_proto_connect_cfm(conn, ev->status);
3575 conn->state = BT_CLOSED;
3576 hci_conn_del(conn);
3577 goto unlock;
3578 }
3579
3580 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3581 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3582 conn->dst_type, 0, NULL, 0, NULL);
3583
3584 conn->sec_level = BT_SECURITY_LOW;
3585 conn->handle = __le16_to_cpu(ev->handle);
3586 conn->state = BT_CONNECTED;
3587
3588 hci_conn_hold_device(conn);
3589 hci_conn_add_sysfs(conn);
3590
3591 hci_proto_connect_cfm(conn, ev->status);
3592
3593 unlock:
3594 hci_dev_unlock(hdev);
3595 }
3596
3597 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3598 {
3599 u8 num_reports = skb->data[0];
3600 void *ptr = &skb->data[1];
3601 s8 rssi;
3602
3603 while (num_reports--) {
3604 struct hci_ev_le_advertising_info *ev = ptr;
3605
3606 rssi = ev->data[ev->length];
3607 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3608 NULL, rssi, 0, 1, ev->data, ev->length);
3609
3610 ptr += sizeof(*ev) + ev->length + 1;
3611 }
3612 }
3613
3614 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3615 {
3616 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3617 struct hci_cp_le_ltk_reply cp;
3618 struct hci_cp_le_ltk_neg_reply neg;
3619 struct hci_conn *conn;
3620 struct smp_ltk *ltk;
3621
3622 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3623
3624 hci_dev_lock(hdev);
3625
3626 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3627 if (conn == NULL)
3628 goto not_found;
3629
3630 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3631 if (ltk == NULL)
3632 goto not_found;
3633
3634 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3635 cp.handle = cpu_to_le16(conn->handle);
3636
3637 if (ltk->authenticated)
3638 conn->sec_level = BT_SECURITY_HIGH;
3639
3640 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3641
3642 if (ltk->type & HCI_SMP_STK) {
3643 list_del(&ltk->list);
3644 kfree(ltk);
3645 }
3646
3647 hci_dev_unlock(hdev);
3648
3649 return;
3650
3651 not_found:
3652 neg.handle = ev->handle;
3653 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3654 hci_dev_unlock(hdev);
3655 }
3656
3657 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3658 {
3659 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3660
3661 skb_pull(skb, sizeof(*le_ev));
3662
3663 switch (le_ev->subevent) {
3664 case HCI_EV_LE_CONN_COMPLETE:
3665 hci_le_conn_complete_evt(hdev, skb);
3666 break;
3667
3668 case HCI_EV_LE_ADVERTISING_REPORT:
3669 hci_le_adv_report_evt(hdev, skb);
3670 break;
3671
3672 case HCI_EV_LE_LTK_REQ:
3673 hci_le_ltk_request_evt(hdev, skb);
3674 break;
3675
3676 default:
3677 break;
3678 }
3679 }
3680
3681 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3682 {
3683 struct hci_ev_channel_selected *ev = (void *) skb->data;
3684 struct hci_conn *hcon;
3685
3686 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3687
3688 skb_pull(skb, sizeof(*ev));
3689
3690 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3691 if (!hcon)
3692 return;
3693
3694 amp_read_loc_assoc_final_data(hdev, hcon);
3695 }
3696
3697 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3698 {
3699 struct hci_event_hdr *hdr = (void *) skb->data;
3700 __u8 event = hdr->evt;
3701
3702 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3703
3704 switch (event) {
3705 case HCI_EV_INQUIRY_COMPLETE:
3706 hci_inquiry_complete_evt(hdev, skb);
3707 break;
3708
3709 case HCI_EV_INQUIRY_RESULT:
3710 hci_inquiry_result_evt(hdev, skb);
3711 break;
3712
3713 case HCI_EV_CONN_COMPLETE:
3714 hci_conn_complete_evt(hdev, skb);
3715 break;
3716
3717 case HCI_EV_CONN_REQUEST:
3718 hci_conn_request_evt(hdev, skb);
3719 break;
3720
3721 case HCI_EV_DISCONN_COMPLETE:
3722 hci_disconn_complete_evt(hdev, skb);
3723 break;
3724
3725 case HCI_EV_AUTH_COMPLETE:
3726 hci_auth_complete_evt(hdev, skb);
3727 break;
3728
3729 case HCI_EV_REMOTE_NAME:
3730 hci_remote_name_evt(hdev, skb);
3731 break;
3732
3733 case HCI_EV_ENCRYPT_CHANGE:
3734 hci_encrypt_change_evt(hdev, skb);
3735 break;
3736
3737 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3738 hci_change_link_key_complete_evt(hdev, skb);
3739 break;
3740
3741 case HCI_EV_REMOTE_FEATURES:
3742 hci_remote_features_evt(hdev, skb);
3743 break;
3744
3745 case HCI_EV_CMD_COMPLETE:
3746 hci_cmd_complete_evt(hdev, skb);
3747 break;
3748
3749 case HCI_EV_CMD_STATUS:
3750 hci_cmd_status_evt(hdev, skb);
3751 break;
3752
3753 case HCI_EV_ROLE_CHANGE:
3754 hci_role_change_evt(hdev, skb);
3755 break;
3756
3757 case HCI_EV_NUM_COMP_PKTS:
3758 hci_num_comp_pkts_evt(hdev, skb);
3759 break;
3760
3761 case HCI_EV_MODE_CHANGE:
3762 hci_mode_change_evt(hdev, skb);
3763 break;
3764
3765 case HCI_EV_PIN_CODE_REQ:
3766 hci_pin_code_request_evt(hdev, skb);
3767 break;
3768
3769 case HCI_EV_LINK_KEY_REQ:
3770 hci_link_key_request_evt(hdev, skb);
3771 break;
3772
3773 case HCI_EV_LINK_KEY_NOTIFY:
3774 hci_link_key_notify_evt(hdev, skb);
3775 break;
3776
3777 case HCI_EV_CLOCK_OFFSET:
3778 hci_clock_offset_evt(hdev, skb);
3779 break;
3780
3781 case HCI_EV_PKT_TYPE_CHANGE:
3782 hci_pkt_type_change_evt(hdev, skb);
3783 break;
3784
3785 case HCI_EV_PSCAN_REP_MODE:
3786 hci_pscan_rep_mode_evt(hdev, skb);
3787 break;
3788
3789 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3790 hci_inquiry_result_with_rssi_evt(hdev, skb);
3791 break;
3792
3793 case HCI_EV_REMOTE_EXT_FEATURES:
3794 hci_remote_ext_features_evt(hdev, skb);
3795 break;
3796
3797 case HCI_EV_SYNC_CONN_COMPLETE:
3798 hci_sync_conn_complete_evt(hdev, skb);
3799 break;
3800
3801 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3802 hci_extended_inquiry_result_evt(hdev, skb);
3803 break;
3804
3805 case HCI_EV_KEY_REFRESH_COMPLETE:
3806 hci_key_refresh_complete_evt(hdev, skb);
3807 break;
3808
3809 case HCI_EV_IO_CAPA_REQUEST:
3810 hci_io_capa_request_evt(hdev, skb);
3811 break;
3812
3813 case HCI_EV_IO_CAPA_REPLY:
3814 hci_io_capa_reply_evt(hdev, skb);
3815 break;
3816
3817 case HCI_EV_USER_CONFIRM_REQUEST:
3818 hci_user_confirm_request_evt(hdev, skb);
3819 break;
3820
3821 case HCI_EV_USER_PASSKEY_REQUEST:
3822 hci_user_passkey_request_evt(hdev, skb);
3823 break;
3824
3825 case HCI_EV_USER_PASSKEY_NOTIFY:
3826 hci_user_passkey_notify_evt(hdev, skb);
3827 break;
3828
3829 case HCI_EV_KEYPRESS_NOTIFY:
3830 hci_keypress_notify_evt(hdev, skb);
3831 break;
3832
3833 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3834 hci_simple_pair_complete_evt(hdev, skb);
3835 break;
3836
3837 case HCI_EV_REMOTE_HOST_FEATURES:
3838 hci_remote_host_features_evt(hdev, skb);
3839 break;
3840
3841 case HCI_EV_LE_META:
3842 hci_le_meta_evt(hdev, skb);
3843 break;
3844
3845 case HCI_EV_CHANNEL_SELECTED:
3846 hci_chan_selected_evt(hdev, skb);
3847 break;
3848
3849 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3850 hci_remote_oob_data_request_evt(hdev, skb);
3851 break;
3852
3853 case HCI_EV_PHY_LINK_COMPLETE:
3854 hci_phy_link_complete_evt(hdev, skb);
3855 break;
3856
3857 case HCI_EV_LOGICAL_LINK_COMPLETE:
3858 hci_loglink_complete_evt(hdev, skb);
3859 break;
3860
3861 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3862 hci_disconn_loglink_complete_evt(hdev, skb);
3863 break;
3864
3865 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3866 hci_disconn_phylink_complete_evt(hdev, skb);
3867 break;
3868
3869 case HCI_EV_NUM_COMP_BLOCKS:
3870 hci_num_comp_blocks_evt(hdev, skb);
3871 break;
3872
3873 default:
3874 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3875 break;
3876 }
3877
3878 kfree_skb(skb);
3879 hdev->stat.evt_rx++;
3880 }