Bluetooth: Add support for custom event terminated commands
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
34
35 /* Handle HCI Event packets */
36
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 __u8 status = *((__u8 *) skb->data);
40
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
42
43 if (status) {
44 hci_dev_lock(hdev);
45 mgmt_stop_discovery_failed(hdev, status);
46 hci_dev_unlock(hdev);
47 return;
48 }
49
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
52 wake_up_bit(&hdev->flags, HCI_INQUIRY);
53
54 hci_dev_lock(hdev);
55 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
56 hci_dev_unlock(hdev);
57
58 hci_conn_check_pending(hdev);
59 }
60
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 {
63 __u8 status = *((__u8 *) skb->data);
64
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
66
67 if (status)
68 return;
69
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 }
72
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 __u8 status = *((__u8 *) skb->data);
76
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78
79 if (status)
80 return;
81
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83
84 hci_conn_check_pending(hdev);
85 }
86
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct sk_buff *skb)
161 {
162 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163
164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165
166 if (rp->status)
167 return;
168
169 hdev->link_policy = __le16_to_cpu(rp->policy);
170 }
171
172 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 struct sk_buff *skb)
174 {
175 __u8 status = *((__u8 *) skb->data);
176 void *sent;
177
178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
179
180 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 if (!sent)
182 return;
183
184 if (!status)
185 hdev->link_policy = get_unaligned_le16(sent);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 /* Reset all non-persistent flags */
197 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198
199 hdev->discovery.state = DISCOVERY_STOPPED;
200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202
203 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 hdev->adv_data_len = 0;
205 }
206
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208 {
209 __u8 status = *((__u8 *) skb->data);
210 void *sent;
211
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
213
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215 if (!sent)
216 return;
217
218 hci_dev_lock(hdev);
219
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
222 else if (!status)
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224
225 hci_dev_unlock(hdev);
226 }
227
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
231
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233
234 if (rp->status)
235 return;
236
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
245
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
247
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 if (!sent)
250 return;
251
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
254
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
257 else
258 clear_bit(HCI_AUTH, &hdev->flags);
259 }
260
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
263 }
264
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
266 {
267 __u8 status = *((__u8 *) skb->data);
268 void *sent;
269
270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
271
272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
273 if (!sent)
274 return;
275
276 if (!status) {
277 __u8 param = *((__u8 *) sent);
278
279 if (param)
280 set_bit(HCI_ENCRYPT, &hdev->flags);
281 else
282 clear_bit(HCI_ENCRYPT, &hdev->flags);
283 }
284 }
285
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 {
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
290 void *sent;
291
292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
293
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 if (!sent)
296 return;
297
298 param = *((__u8 *) sent);
299
300 hci_dev_lock(hdev);
301
302 if (status) {
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
305 goto done;
306 }
307
308 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310
311 if (param & SCAN_INQUIRY) {
312 set_bit(HCI_ISCAN, &hdev->flags);
313 if (!old_iscan)
314 mgmt_discoverable(hdev, 1);
315 if (hdev->discov_timeout > 0) {
316 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318 to);
319 }
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
322
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
325 if (!old_pscan)
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
329
330 done:
331 hci_dev_unlock(hdev);
332 }
333
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337
338 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
339
340 if (rp->status)
341 return;
342
343 memcpy(hdev->dev_class, rp->dev_class, 3);
344
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 __u8 status = *((__u8 *) skb->data);
352 void *sent;
353
354 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 if (!sent)
358 return;
359
360 hci_dev_lock(hdev);
361
362 if (status == 0)
363 memcpy(hdev->dev_class, sent, 3);
364
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
367
368 hci_dev_unlock(hdev);
369 }
370
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 __u16 setting;
375
376 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
377
378 if (rp->status)
379 return;
380
381 setting = __le16_to_cpu(rp->voice_setting);
382
383 if (hdev->voice_setting == setting)
384 return;
385
386 hdev->voice_setting = setting;
387
388 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
389
390 if (hdev->notify)
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
395 struct sk_buff *skb)
396 {
397 __u8 status = *((__u8 *) skb->data);
398 __u16 setting;
399 void *sent;
400
401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
402
403 if (status)
404 return;
405
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 if (!sent)
408 return;
409
410 setting = get_unaligned_le16(sent);
411
412 if (hdev->voice_setting == setting)
413 return;
414
415 hdev->voice_setting = setting;
416
417 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
418
419 if (hdev->notify)
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421 }
422
423 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 __u8 status = *((__u8 *) skb->data);
426 struct hci_cp_write_ssp_mode *sent;
427
428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
429
430 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
431 if (!sent)
432 return;
433
434 if (!status) {
435 if (sent->mode)
436 hdev->host_features[0] |= LMP_HOST_SSP;
437 else
438 hdev->host_features[0] &= ~LMP_HOST_SSP;
439 }
440
441 if (test_bit(HCI_MGMT, &hdev->dev_flags))
442 mgmt_ssp_enable_complete(hdev, sent->mode, status);
443 else if (!status) {
444 if (sent->mode)
445 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
446 else
447 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 }
449 }
450
451 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
452 {
453 struct hci_rp_read_local_version *rp = (void *) skb->data;
454
455 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
456
457 if (rp->status)
458 return;
459
460 hdev->hci_ver = rp->hci_ver;
461 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
462 hdev->lmp_ver = rp->lmp_ver;
463 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
464 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
465
466 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
467 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
468 }
469
470 static void hci_cc_read_local_commands(struct hci_dev *hdev,
471 struct sk_buff *skb)
472 {
473 struct hci_rp_read_local_commands *rp = (void *) skb->data;
474
475 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
476
477 if (!rp->status)
478 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
479 }
480
481 static void hci_cc_read_local_features(struct hci_dev *hdev,
482 struct sk_buff *skb)
483 {
484 struct hci_rp_read_local_features *rp = (void *) skb->data;
485
486 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
487
488 if (rp->status)
489 return;
490
491 memcpy(hdev->features, rp->features, 8);
492
493 /* Adjust default settings according to features
494 * supported by device. */
495
496 if (hdev->features[0] & LMP_3SLOT)
497 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
498
499 if (hdev->features[0] & LMP_5SLOT)
500 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
501
502 if (hdev->features[1] & LMP_HV2) {
503 hdev->pkt_type |= (HCI_HV2);
504 hdev->esco_type |= (ESCO_HV2);
505 }
506
507 if (hdev->features[1] & LMP_HV3) {
508 hdev->pkt_type |= (HCI_HV3);
509 hdev->esco_type |= (ESCO_HV3);
510 }
511
512 if (lmp_esco_capable(hdev))
513 hdev->esco_type |= (ESCO_EV3);
514
515 if (hdev->features[4] & LMP_EV4)
516 hdev->esco_type |= (ESCO_EV4);
517
518 if (hdev->features[4] & LMP_EV5)
519 hdev->esco_type |= (ESCO_EV5);
520
521 if (hdev->features[5] & LMP_EDR_ESCO_2M)
522 hdev->esco_type |= (ESCO_2EV3);
523
524 if (hdev->features[5] & LMP_EDR_ESCO_3M)
525 hdev->esco_type |= (ESCO_3EV3);
526
527 if (hdev->features[5] & LMP_EDR_3S_ESCO)
528 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
529
530 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
531 hdev->features[0], hdev->features[1],
532 hdev->features[2], hdev->features[3],
533 hdev->features[4], hdev->features[5],
534 hdev->features[6], hdev->features[7]);
535 }
536
537 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
538 struct sk_buff *skb)
539 {
540 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
541
542 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
543
544 if (rp->status)
545 return;
546
547 switch (rp->page) {
548 case 0:
549 memcpy(hdev->features, rp->features, 8);
550 break;
551 case 1:
552 memcpy(hdev->host_features, rp->features, 8);
553 break;
554 }
555 }
556
557 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
558 struct sk_buff *skb)
559 {
560 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
561
562 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
563
564 if (!rp->status)
565 hdev->flow_ctl_mode = rp->mode;
566 }
567
568 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
569 {
570 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
571
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573
574 if (rp->status)
575 return;
576
577 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
578 hdev->sco_mtu = rp->sco_mtu;
579 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
580 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
581
582 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
583 hdev->sco_mtu = 64;
584 hdev->sco_pkts = 8;
585 }
586
587 hdev->acl_cnt = hdev->acl_pkts;
588 hdev->sco_cnt = hdev->sco_pkts;
589
590 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
591 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
592 }
593
594 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
595 {
596 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
597
598 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
599
600 if (!rp->status)
601 bacpy(&hdev->bdaddr, &rp->bdaddr);
602 }
603
604 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
605 struct sk_buff *skb)
606 {
607 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
608
609 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610
611 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
612 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
613 hdev->page_scan_window = __le16_to_cpu(rp->window);
614 }
615 }
616
617 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
618 struct sk_buff *skb)
619 {
620 u8 status = *((u8 *) skb->data);
621 struct hci_cp_write_page_scan_activity *sent;
622
623 BT_DBG("%s status 0x%2.2x", hdev->name, status);
624
625 if (status)
626 return;
627
628 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
629 if (!sent)
630 return;
631
632 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
633 hdev->page_scan_window = __le16_to_cpu(sent->window);
634 }
635
636 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
637 struct sk_buff *skb)
638 {
639 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
640
641 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
642
643 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
644 hdev->page_scan_type = rp->type;
645 }
646
647 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
648 struct sk_buff *skb)
649 {
650 u8 status = *((u8 *) skb->data);
651 u8 *type;
652
653 BT_DBG("%s status 0x%2.2x", hdev->name, status);
654
655 if (status)
656 return;
657
658 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
659 if (type)
660 hdev->page_scan_type = *type;
661 }
662
663 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
664 struct sk_buff *skb)
665 {
666 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
667
668 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
669
670 if (rp->status)
671 return;
672
673 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
674 hdev->block_len = __le16_to_cpu(rp->block_len);
675 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
676
677 hdev->block_cnt = hdev->num_blocks;
678
679 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
680 hdev->block_cnt, hdev->block_len);
681 }
682
683 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
684 struct sk_buff *skb)
685 {
686 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
687
688 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
689
690 if (rp->status)
691 goto a2mp_rsp;
692
693 hdev->amp_status = rp->amp_status;
694 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
695 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
696 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
697 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
698 hdev->amp_type = rp->amp_type;
699 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
700 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
701 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
702 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
703
704 a2mp_rsp:
705 a2mp_send_getinfo_rsp(hdev);
706 }
707
708 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
709 struct sk_buff *skb)
710 {
711 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
712 struct amp_assoc *assoc = &hdev->loc_assoc;
713 size_t rem_len, frag_len;
714
715 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716
717 if (rp->status)
718 goto a2mp_rsp;
719
720 frag_len = skb->len - sizeof(*rp);
721 rem_len = __le16_to_cpu(rp->rem_len);
722
723 if (rem_len > frag_len) {
724 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
725
726 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
727 assoc->offset += frag_len;
728
729 /* Read other fragments */
730 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
731
732 return;
733 }
734
735 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
736 assoc->len = assoc->offset + rem_len;
737 assoc->offset = 0;
738
739 a2mp_rsp:
740 /* Send A2MP Rsp when all fragments are received */
741 a2mp_send_getampassoc_rsp(hdev, rp->status);
742 a2mp_send_create_phy_link_req(hdev, rp->status);
743 }
744
745 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
746 struct sk_buff *skb)
747 {
748 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
749
750 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
751
752 if (!rp->status)
753 hdev->inq_tx_power = rp->tx_power;
754 }
755
756 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
757 {
758 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
759 struct hci_cp_pin_code_reply *cp;
760 struct hci_conn *conn;
761
762 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
763
764 hci_dev_lock(hdev);
765
766 if (test_bit(HCI_MGMT, &hdev->dev_flags))
767 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
768
769 if (rp->status)
770 goto unlock;
771
772 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
773 if (!cp)
774 goto unlock;
775
776 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
777 if (conn)
778 conn->pin_length = cp->pin_len;
779
780 unlock:
781 hci_dev_unlock(hdev);
782 }
783
784 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
785 {
786 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
787
788 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
789
790 hci_dev_lock(hdev);
791
792 if (test_bit(HCI_MGMT, &hdev->dev_flags))
793 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
794 rp->status);
795
796 hci_dev_unlock(hdev);
797 }
798
799 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
800 struct sk_buff *skb)
801 {
802 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
803
804 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
805
806 if (rp->status)
807 return;
808
809 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
810 hdev->le_pkts = rp->le_max_pkt;
811
812 hdev->le_cnt = hdev->le_pkts;
813
814 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
815 }
816
817 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
818 struct sk_buff *skb)
819 {
820 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
821
822 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823
824 if (!rp->status)
825 memcpy(hdev->le_features, rp->features, 8);
826 }
827
828 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
829 struct sk_buff *skb)
830 {
831 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
832
833 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
834
835 if (!rp->status)
836 hdev->adv_tx_power = rp->tx_power;
837 }
838
839 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
840 {
841 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
842
843 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
844
845 hci_dev_lock(hdev);
846
847 if (test_bit(HCI_MGMT, &hdev->dev_flags))
848 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
849 rp->status);
850
851 hci_dev_unlock(hdev);
852 }
853
854 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
855 struct sk_buff *skb)
856 {
857 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
858
859 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
860
861 hci_dev_lock(hdev);
862
863 if (test_bit(HCI_MGMT, &hdev->dev_flags))
864 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
865 ACL_LINK, 0, rp->status);
866
867 hci_dev_unlock(hdev);
868 }
869
870 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
871 {
872 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
873
874 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
875
876 hci_dev_lock(hdev);
877
878 if (test_bit(HCI_MGMT, &hdev->dev_flags))
879 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
880 0, rp->status);
881
882 hci_dev_unlock(hdev);
883 }
884
885 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
886 struct sk_buff *skb)
887 {
888 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
889
890 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
891
892 hci_dev_lock(hdev);
893
894 if (test_bit(HCI_MGMT, &hdev->dev_flags))
895 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
896 ACL_LINK, 0, rp->status);
897
898 hci_dev_unlock(hdev);
899 }
900
901 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
902 struct sk_buff *skb)
903 {
904 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
905
906 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
907
908 hci_dev_lock(hdev);
909 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
910 rp->randomizer, rp->status);
911 hci_dev_unlock(hdev);
912 }
913
914 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
915 {
916 __u8 *sent, status = *((__u8 *) skb->data);
917
918 BT_DBG("%s status 0x%2.2x", hdev->name, status);
919
920 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
921 if (!sent)
922 return;
923
924 hci_dev_lock(hdev);
925
926 if (!status) {
927 if (*sent)
928 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
929 else
930 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
931 }
932
933 if (!test_bit(HCI_INIT, &hdev->flags)) {
934 struct hci_request req;
935
936 hci_req_init(&req, hdev);
937 hci_update_ad(&req);
938 hci_req_run(&req, NULL);
939 }
940
941 hci_dev_unlock(hdev);
942 }
943
944 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
945 {
946 __u8 status = *((__u8 *) skb->data);
947
948 BT_DBG("%s status 0x%2.2x", hdev->name, status);
949
950 if (status) {
951 hci_dev_lock(hdev);
952 mgmt_start_discovery_failed(hdev, status);
953 hci_dev_unlock(hdev);
954 return;
955 }
956 }
957
958 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
959 struct sk_buff *skb)
960 {
961 struct hci_cp_le_set_scan_enable *cp;
962 __u8 status = *((__u8 *) skb->data);
963
964 BT_DBG("%s status 0x%2.2x", hdev->name, status);
965
966 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
967 if (!cp)
968 return;
969
970 switch (cp->enable) {
971 case LE_SCANNING_ENABLED:
972 if (status) {
973 hci_dev_lock(hdev);
974 mgmt_start_discovery_failed(hdev, status);
975 hci_dev_unlock(hdev);
976 return;
977 }
978
979 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
980
981 hci_dev_lock(hdev);
982 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
983 hci_dev_unlock(hdev);
984 break;
985
986 case LE_SCANNING_DISABLED:
987 if (status) {
988 hci_dev_lock(hdev);
989 mgmt_stop_discovery_failed(hdev, status);
990 hci_dev_unlock(hdev);
991 return;
992 }
993
994 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
995
996 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
997 hdev->discovery.state == DISCOVERY_FINDING) {
998 mgmt_interleaved_discovery(hdev);
999 } else {
1000 hci_dev_lock(hdev);
1001 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1002 hci_dev_unlock(hdev);
1003 }
1004
1005 break;
1006
1007 default:
1008 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1009 break;
1010 }
1011 }
1012
1013 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1014 struct sk_buff *skb)
1015 {
1016 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1017
1018 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1019
1020 if (!rp->status)
1021 hdev->le_white_list_size = rp->size;
1022 }
1023
1024 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1025 struct sk_buff *skb)
1026 {
1027 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1028
1029 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030
1031 if (!rp->status)
1032 memcpy(hdev->le_states, rp->le_states, 8);
1033 }
1034
1035 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1036 struct sk_buff *skb)
1037 {
1038 struct hci_cp_write_le_host_supported *sent;
1039 __u8 status = *((__u8 *) skb->data);
1040
1041 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1042
1043 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1044 if (!sent)
1045 return;
1046
1047 if (!status) {
1048 if (sent->le)
1049 hdev->host_features[0] |= LMP_HOST_LE;
1050 else
1051 hdev->host_features[0] &= ~LMP_HOST_LE;
1052
1053 if (sent->simul)
1054 hdev->host_features[0] |= LMP_HOST_LE_BREDR;
1055 else
1056 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
1057 }
1058
1059 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1060 !test_bit(HCI_INIT, &hdev->flags))
1061 mgmt_le_enable_complete(hdev, sent->le, status);
1062 }
1063
1064 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1065 struct sk_buff *skb)
1066 {
1067 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1068
1069 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1070 hdev->name, rp->status, rp->phy_handle);
1071
1072 if (rp->status)
1073 return;
1074
1075 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1076 }
1077
1078 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1079 {
1080 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1081
1082 if (status) {
1083 hci_conn_check_pending(hdev);
1084 hci_dev_lock(hdev);
1085 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1086 mgmt_start_discovery_failed(hdev, status);
1087 hci_dev_unlock(hdev);
1088 return;
1089 }
1090
1091 set_bit(HCI_INQUIRY, &hdev->flags);
1092
1093 hci_dev_lock(hdev);
1094 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1095 hci_dev_unlock(hdev);
1096 }
1097
1098 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1099 {
1100 struct hci_cp_create_conn *cp;
1101 struct hci_conn *conn;
1102
1103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1104
1105 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1106 if (!cp)
1107 return;
1108
1109 hci_dev_lock(hdev);
1110
1111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1112
1113 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1114
1115 if (status) {
1116 if (conn && conn->state == BT_CONNECT) {
1117 if (status != 0x0c || conn->attempt > 2) {
1118 conn->state = BT_CLOSED;
1119 hci_proto_connect_cfm(conn, status);
1120 hci_conn_del(conn);
1121 } else
1122 conn->state = BT_CONNECT2;
1123 }
1124 } else {
1125 if (!conn) {
1126 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1127 if (conn) {
1128 conn->out = true;
1129 conn->link_mode |= HCI_LM_MASTER;
1130 } else
1131 BT_ERR("No memory for new connection");
1132 }
1133 }
1134
1135 hci_dev_unlock(hdev);
1136 }
1137
1138 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1139 {
1140 struct hci_cp_add_sco *cp;
1141 struct hci_conn *acl, *sco;
1142 __u16 handle;
1143
1144 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1145
1146 if (!status)
1147 return;
1148
1149 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1150 if (!cp)
1151 return;
1152
1153 handle = __le16_to_cpu(cp->handle);
1154
1155 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1156
1157 hci_dev_lock(hdev);
1158
1159 acl = hci_conn_hash_lookup_handle(hdev, handle);
1160 if (acl) {
1161 sco = acl->link;
1162 if (sco) {
1163 sco->state = BT_CLOSED;
1164
1165 hci_proto_connect_cfm(sco, status);
1166 hci_conn_del(sco);
1167 }
1168 }
1169
1170 hci_dev_unlock(hdev);
1171 }
1172
1173 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1174 {
1175 struct hci_cp_auth_requested *cp;
1176 struct hci_conn *conn;
1177
1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1179
1180 if (!status)
1181 return;
1182
1183 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1184 if (!cp)
1185 return;
1186
1187 hci_dev_lock(hdev);
1188
1189 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1190 if (conn) {
1191 if (conn->state == BT_CONFIG) {
1192 hci_proto_connect_cfm(conn, status);
1193 hci_conn_put(conn);
1194 }
1195 }
1196
1197 hci_dev_unlock(hdev);
1198 }
1199
1200 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1201 {
1202 struct hci_cp_set_conn_encrypt *cp;
1203 struct hci_conn *conn;
1204
1205 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1206
1207 if (!status)
1208 return;
1209
1210 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1211 if (!cp)
1212 return;
1213
1214 hci_dev_lock(hdev);
1215
1216 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1217 if (conn) {
1218 if (conn->state == BT_CONFIG) {
1219 hci_proto_connect_cfm(conn, status);
1220 hci_conn_put(conn);
1221 }
1222 }
1223
1224 hci_dev_unlock(hdev);
1225 }
1226
1227 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1228 struct hci_conn *conn)
1229 {
1230 if (conn->state != BT_CONFIG || !conn->out)
1231 return 0;
1232
1233 if (conn->pending_sec_level == BT_SECURITY_SDP)
1234 return 0;
1235
1236 /* Only request authentication for SSP connections or non-SSP
1237 * devices with sec_level HIGH or if MITM protection is requested */
1238 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1239 conn->pending_sec_level != BT_SECURITY_HIGH)
1240 return 0;
1241
1242 return 1;
1243 }
1244
1245 static int hci_resolve_name(struct hci_dev *hdev,
1246 struct inquiry_entry *e)
1247 {
1248 struct hci_cp_remote_name_req cp;
1249
1250 memset(&cp, 0, sizeof(cp));
1251
1252 bacpy(&cp.bdaddr, &e->data.bdaddr);
1253 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1254 cp.pscan_mode = e->data.pscan_mode;
1255 cp.clock_offset = e->data.clock_offset;
1256
1257 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1258 }
1259
1260 static bool hci_resolve_next_name(struct hci_dev *hdev)
1261 {
1262 struct discovery_state *discov = &hdev->discovery;
1263 struct inquiry_entry *e;
1264
1265 if (list_empty(&discov->resolve))
1266 return false;
1267
1268 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1269 if (!e)
1270 return false;
1271
1272 if (hci_resolve_name(hdev, e) == 0) {
1273 e->name_state = NAME_PENDING;
1274 return true;
1275 }
1276
1277 return false;
1278 }
1279
1280 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1281 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1282 {
1283 struct discovery_state *discov = &hdev->discovery;
1284 struct inquiry_entry *e;
1285
1286 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1287 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1288 name_len, conn->dev_class);
1289
1290 if (discov->state == DISCOVERY_STOPPED)
1291 return;
1292
1293 if (discov->state == DISCOVERY_STOPPING)
1294 goto discov_complete;
1295
1296 if (discov->state != DISCOVERY_RESOLVING)
1297 return;
1298
1299 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1300 /* If the device was not found in a list of found devices names of which
1301 * are pending. there is no need to continue resolving a next name as it
1302 * will be done upon receiving another Remote Name Request Complete
1303 * Event */
1304 if (!e)
1305 return;
1306
1307 list_del(&e->list);
1308 if (name) {
1309 e->name_state = NAME_KNOWN;
1310 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1311 e->data.rssi, name, name_len);
1312 } else {
1313 e->name_state = NAME_NOT_KNOWN;
1314 }
1315
1316 if (hci_resolve_next_name(hdev))
1317 return;
1318
1319 discov_complete:
1320 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1321 }
1322
1323 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1324 {
1325 struct hci_cp_remote_name_req *cp;
1326 struct hci_conn *conn;
1327
1328 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1329
1330 /* If successful wait for the name req complete event before
1331 * checking for the need to do authentication */
1332 if (!status)
1333 return;
1334
1335 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1336 if (!cp)
1337 return;
1338
1339 hci_dev_lock(hdev);
1340
1341 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1342
1343 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1344 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1345
1346 if (!conn)
1347 goto unlock;
1348
1349 if (!hci_outgoing_auth_needed(hdev, conn))
1350 goto unlock;
1351
1352 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1353 struct hci_cp_auth_requested cp;
1354 cp.handle = __cpu_to_le16(conn->handle);
1355 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1356 }
1357
1358 unlock:
1359 hci_dev_unlock(hdev);
1360 }
1361
1362 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1363 {
1364 struct hci_cp_read_remote_features *cp;
1365 struct hci_conn *conn;
1366
1367 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1368
1369 if (!status)
1370 return;
1371
1372 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1373 if (!cp)
1374 return;
1375
1376 hci_dev_lock(hdev);
1377
1378 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1379 if (conn) {
1380 if (conn->state == BT_CONFIG) {
1381 hci_proto_connect_cfm(conn, status);
1382 hci_conn_put(conn);
1383 }
1384 }
1385
1386 hci_dev_unlock(hdev);
1387 }
1388
1389 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1390 {
1391 struct hci_cp_read_remote_ext_features *cp;
1392 struct hci_conn *conn;
1393
1394 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1395
1396 if (!status)
1397 return;
1398
1399 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1400 if (!cp)
1401 return;
1402
1403 hci_dev_lock(hdev);
1404
1405 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1406 if (conn) {
1407 if (conn->state == BT_CONFIG) {
1408 hci_proto_connect_cfm(conn, status);
1409 hci_conn_put(conn);
1410 }
1411 }
1412
1413 hci_dev_unlock(hdev);
1414 }
1415
1416 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1417 {
1418 struct hci_cp_setup_sync_conn *cp;
1419 struct hci_conn *acl, *sco;
1420 __u16 handle;
1421
1422 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1423
1424 if (!status)
1425 return;
1426
1427 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1428 if (!cp)
1429 return;
1430
1431 handle = __le16_to_cpu(cp->handle);
1432
1433 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1434
1435 hci_dev_lock(hdev);
1436
1437 acl = hci_conn_hash_lookup_handle(hdev, handle);
1438 if (acl) {
1439 sco = acl->link;
1440 if (sco) {
1441 sco->state = BT_CLOSED;
1442
1443 hci_proto_connect_cfm(sco, status);
1444 hci_conn_del(sco);
1445 }
1446 }
1447
1448 hci_dev_unlock(hdev);
1449 }
1450
1451 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1452 {
1453 struct hci_cp_sniff_mode *cp;
1454 struct hci_conn *conn;
1455
1456 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1457
1458 if (!status)
1459 return;
1460
1461 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1462 if (!cp)
1463 return;
1464
1465 hci_dev_lock(hdev);
1466
1467 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1468 if (conn) {
1469 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1470
1471 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1472 hci_sco_setup(conn, status);
1473 }
1474
1475 hci_dev_unlock(hdev);
1476 }
1477
1478 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1479 {
1480 struct hci_cp_exit_sniff_mode *cp;
1481 struct hci_conn *conn;
1482
1483 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1484
1485 if (!status)
1486 return;
1487
1488 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1489 if (!cp)
1490 return;
1491
1492 hci_dev_lock(hdev);
1493
1494 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1495 if (conn) {
1496 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1497
1498 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1499 hci_sco_setup(conn, status);
1500 }
1501
1502 hci_dev_unlock(hdev);
1503 }
1504
1505 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1506 {
1507 struct hci_cp_disconnect *cp;
1508 struct hci_conn *conn;
1509
1510 if (!status)
1511 return;
1512
1513 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1514 if (!cp)
1515 return;
1516
1517 hci_dev_lock(hdev);
1518
1519 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1520 if (conn)
1521 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1522 conn->dst_type, status);
1523
1524 hci_dev_unlock(hdev);
1525 }
1526
1527 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1528 {
1529 struct hci_conn *conn;
1530
1531 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1532
1533 if (status) {
1534 hci_dev_lock(hdev);
1535
1536 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1537 if (!conn) {
1538 hci_dev_unlock(hdev);
1539 return;
1540 }
1541
1542 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1543
1544 conn->state = BT_CLOSED;
1545 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1546 conn->dst_type, status);
1547 hci_proto_connect_cfm(conn, status);
1548 hci_conn_del(conn);
1549
1550 hci_dev_unlock(hdev);
1551 }
1552 }
1553
1554 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1555 {
1556 struct hci_cp_create_phy_link *cp;
1557
1558 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1559
1560 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1561 if (!cp)
1562 return;
1563
1564 hci_dev_lock(hdev);
1565
1566 if (status) {
1567 struct hci_conn *hcon;
1568
1569 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1570 if (hcon)
1571 hci_conn_del(hcon);
1572 } else {
1573 amp_write_remote_assoc(hdev, cp->phy_handle);
1574 }
1575
1576 hci_dev_unlock(hdev);
1577 }
1578
1579 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1580 {
1581 struct hci_cp_accept_phy_link *cp;
1582
1583 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1584
1585 if (status)
1586 return;
1587
1588 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1589 if (!cp)
1590 return;
1591
1592 amp_write_remote_assoc(hdev, cp->phy_handle);
1593 }
1594
1595 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1596 {
1597 __u8 status = *((__u8 *) skb->data);
1598 struct discovery_state *discov = &hdev->discovery;
1599 struct inquiry_entry *e;
1600
1601 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1602
1603 hci_conn_check_pending(hdev);
1604
1605 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1606 return;
1607
1608 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1609 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1610
1611 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1612 return;
1613
1614 hci_dev_lock(hdev);
1615
1616 if (discov->state != DISCOVERY_FINDING)
1617 goto unlock;
1618
1619 if (list_empty(&discov->resolve)) {
1620 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1621 goto unlock;
1622 }
1623
1624 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1625 if (e && hci_resolve_name(hdev, e) == 0) {
1626 e->name_state = NAME_PENDING;
1627 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1628 } else {
1629 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1630 }
1631
1632 unlock:
1633 hci_dev_unlock(hdev);
1634 }
1635
1636 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1637 {
1638 struct inquiry_data data;
1639 struct inquiry_info *info = (void *) (skb->data + 1);
1640 int num_rsp = *((__u8 *) skb->data);
1641
1642 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1643
1644 if (!num_rsp)
1645 return;
1646
1647 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1648 return;
1649
1650 hci_dev_lock(hdev);
1651
1652 for (; num_rsp; num_rsp--, info++) {
1653 bool name_known, ssp;
1654
1655 bacpy(&data.bdaddr, &info->bdaddr);
1656 data.pscan_rep_mode = info->pscan_rep_mode;
1657 data.pscan_period_mode = info->pscan_period_mode;
1658 data.pscan_mode = info->pscan_mode;
1659 memcpy(data.dev_class, info->dev_class, 3);
1660 data.clock_offset = info->clock_offset;
1661 data.rssi = 0x00;
1662 data.ssp_mode = 0x00;
1663
1664 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1665 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1666 info->dev_class, 0, !name_known, ssp, NULL,
1667 0);
1668 }
1669
1670 hci_dev_unlock(hdev);
1671 }
1672
1673 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1674 {
1675 struct hci_ev_conn_complete *ev = (void *) skb->data;
1676 struct hci_conn *conn;
1677
1678 BT_DBG("%s", hdev->name);
1679
1680 hci_dev_lock(hdev);
1681
1682 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1683 if (!conn) {
1684 if (ev->link_type != SCO_LINK)
1685 goto unlock;
1686
1687 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1688 if (!conn)
1689 goto unlock;
1690
1691 conn->type = SCO_LINK;
1692 }
1693
1694 if (!ev->status) {
1695 conn->handle = __le16_to_cpu(ev->handle);
1696
1697 if (conn->type == ACL_LINK) {
1698 conn->state = BT_CONFIG;
1699 hci_conn_hold(conn);
1700
1701 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1702 !hci_find_link_key(hdev, &ev->bdaddr))
1703 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1704 else
1705 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1706 } else
1707 conn->state = BT_CONNECTED;
1708
1709 hci_conn_hold_device(conn);
1710 hci_conn_add_sysfs(conn);
1711
1712 if (test_bit(HCI_AUTH, &hdev->flags))
1713 conn->link_mode |= HCI_LM_AUTH;
1714
1715 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1716 conn->link_mode |= HCI_LM_ENCRYPT;
1717
1718 /* Get remote features */
1719 if (conn->type == ACL_LINK) {
1720 struct hci_cp_read_remote_features cp;
1721 cp.handle = ev->handle;
1722 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1723 sizeof(cp), &cp);
1724 }
1725
1726 /* Set packet type for incoming connection */
1727 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1728 struct hci_cp_change_conn_ptype cp;
1729 cp.handle = ev->handle;
1730 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1731 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1732 &cp);
1733 }
1734 } else {
1735 conn->state = BT_CLOSED;
1736 if (conn->type == ACL_LINK)
1737 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1738 conn->dst_type, ev->status);
1739 }
1740
1741 if (conn->type == ACL_LINK)
1742 hci_sco_setup(conn, ev->status);
1743
1744 if (ev->status) {
1745 hci_proto_connect_cfm(conn, ev->status);
1746 hci_conn_del(conn);
1747 } else if (ev->link_type != ACL_LINK)
1748 hci_proto_connect_cfm(conn, ev->status);
1749
1750 unlock:
1751 hci_dev_unlock(hdev);
1752
1753 hci_conn_check_pending(hdev);
1754 }
1755
1756 void hci_conn_accept(struct hci_conn *conn, int mask)
1757 {
1758 struct hci_dev *hdev = conn->hdev;
1759
1760 BT_DBG("conn %p", conn);
1761
1762 conn->state = BT_CONFIG;
1763
1764 if (!lmp_esco_capable(hdev)) {
1765 struct hci_cp_accept_conn_req cp;
1766
1767 bacpy(&cp.bdaddr, &conn->dst);
1768
1769 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1770 cp.role = 0x00; /* Become master */
1771 else
1772 cp.role = 0x01; /* Remain slave */
1773
1774 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
1775 } else /* lmp_esco_capable(hdev)) */ {
1776 struct hci_cp_accept_sync_conn_req cp;
1777
1778 bacpy(&cp.bdaddr, &conn->dst);
1779 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1780
1781 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1782 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1783 cp.max_latency = __constant_cpu_to_le16(0xffff);
1784 cp.content_format = cpu_to_le16(hdev->voice_setting);
1785 cp.retrans_effort = 0xff;
1786
1787 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1788 sizeof(cp), &cp);
1789 }
1790 }
1791
1792 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1793 {
1794 struct hci_ev_conn_request *ev = (void *) skb->data;
1795 int mask = hdev->link_mode;
1796 __u8 flags = 0;
1797
1798 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1799 ev->link_type);
1800
1801 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1802 &flags);
1803
1804 if ((mask & HCI_LM_ACCEPT) &&
1805 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1806 /* Connection accepted */
1807 struct inquiry_entry *ie;
1808 struct hci_conn *conn;
1809
1810 hci_dev_lock(hdev);
1811
1812 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1813 if (ie)
1814 memcpy(ie->data.dev_class, ev->dev_class, 3);
1815
1816 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1817 &ev->bdaddr);
1818 if (!conn) {
1819 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1820 if (!conn) {
1821 BT_ERR("No memory for new connection");
1822 hci_dev_unlock(hdev);
1823 return;
1824 }
1825 }
1826
1827 memcpy(conn->dev_class, ev->dev_class, 3);
1828
1829 hci_dev_unlock(hdev);
1830
1831 if (ev->link_type == ACL_LINK ||
1832 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1833 struct hci_cp_accept_conn_req cp;
1834 conn->state = BT_CONNECT;
1835
1836 bacpy(&cp.bdaddr, &ev->bdaddr);
1837
1838 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1839 cp.role = 0x00; /* Become master */
1840 else
1841 cp.role = 0x01; /* Remain slave */
1842
1843 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1844 &cp);
1845 } else if (!(flags & HCI_PROTO_DEFER)) {
1846 struct hci_cp_accept_sync_conn_req cp;
1847 conn->state = BT_CONNECT;
1848
1849 bacpy(&cp.bdaddr, &ev->bdaddr);
1850 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1851
1852 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1853 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1854 cp.max_latency = __constant_cpu_to_le16(0xffff);
1855 cp.content_format = cpu_to_le16(hdev->voice_setting);
1856 cp.retrans_effort = 0xff;
1857
1858 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1859 sizeof(cp), &cp);
1860 } else {
1861 conn->state = BT_CONNECT2;
1862 hci_proto_connect_cfm(conn, 0);
1863 hci_conn_put(conn);
1864 }
1865 } else {
1866 /* Connection rejected */
1867 struct hci_cp_reject_conn_req cp;
1868
1869 bacpy(&cp.bdaddr, &ev->bdaddr);
1870 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1871 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1872 }
1873 }
1874
1875 static u8 hci_to_mgmt_reason(u8 err)
1876 {
1877 switch (err) {
1878 case HCI_ERROR_CONNECTION_TIMEOUT:
1879 return MGMT_DEV_DISCONN_TIMEOUT;
1880 case HCI_ERROR_REMOTE_USER_TERM:
1881 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1882 case HCI_ERROR_REMOTE_POWER_OFF:
1883 return MGMT_DEV_DISCONN_REMOTE;
1884 case HCI_ERROR_LOCAL_HOST_TERM:
1885 return MGMT_DEV_DISCONN_LOCAL_HOST;
1886 default:
1887 return MGMT_DEV_DISCONN_UNKNOWN;
1888 }
1889 }
1890
1891 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1892 {
1893 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1894 struct hci_conn *conn;
1895
1896 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1897
1898 hci_dev_lock(hdev);
1899
1900 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1901 if (!conn)
1902 goto unlock;
1903
1904 if (ev->status == 0)
1905 conn->state = BT_CLOSED;
1906
1907 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1908 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1909 if (ev->status) {
1910 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1911 conn->dst_type, ev->status);
1912 } else {
1913 u8 reason = hci_to_mgmt_reason(ev->reason);
1914
1915 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1916 conn->dst_type, reason);
1917 }
1918 }
1919
1920 if (ev->status == 0) {
1921 if (conn->type == ACL_LINK && conn->flush_key)
1922 hci_remove_link_key(hdev, &conn->dst);
1923 hci_proto_disconn_cfm(conn, ev->reason);
1924 hci_conn_del(conn);
1925 }
1926
1927 unlock:
1928 hci_dev_unlock(hdev);
1929 }
1930
1931 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1932 {
1933 struct hci_ev_auth_complete *ev = (void *) skb->data;
1934 struct hci_conn *conn;
1935
1936 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1937
1938 hci_dev_lock(hdev);
1939
1940 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1941 if (!conn)
1942 goto unlock;
1943
1944 if (!ev->status) {
1945 if (!hci_conn_ssp_enabled(conn) &&
1946 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1947 BT_INFO("re-auth of legacy device is not possible.");
1948 } else {
1949 conn->link_mode |= HCI_LM_AUTH;
1950 conn->sec_level = conn->pending_sec_level;
1951 }
1952 } else {
1953 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1954 ev->status);
1955 }
1956
1957 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1958 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1959
1960 if (conn->state == BT_CONFIG) {
1961 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1962 struct hci_cp_set_conn_encrypt cp;
1963 cp.handle = ev->handle;
1964 cp.encrypt = 0x01;
1965 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1966 &cp);
1967 } else {
1968 conn->state = BT_CONNECTED;
1969 hci_proto_connect_cfm(conn, ev->status);
1970 hci_conn_put(conn);
1971 }
1972 } else {
1973 hci_auth_cfm(conn, ev->status);
1974
1975 hci_conn_hold(conn);
1976 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1977 hci_conn_put(conn);
1978 }
1979
1980 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1981 if (!ev->status) {
1982 struct hci_cp_set_conn_encrypt cp;
1983 cp.handle = ev->handle;
1984 cp.encrypt = 0x01;
1985 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1986 &cp);
1987 } else {
1988 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1989 hci_encrypt_cfm(conn, ev->status, 0x00);
1990 }
1991 }
1992
1993 unlock:
1994 hci_dev_unlock(hdev);
1995 }
1996
1997 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1998 {
1999 struct hci_ev_remote_name *ev = (void *) skb->data;
2000 struct hci_conn *conn;
2001
2002 BT_DBG("%s", hdev->name);
2003
2004 hci_conn_check_pending(hdev);
2005
2006 hci_dev_lock(hdev);
2007
2008 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2009
2010 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2011 goto check_auth;
2012
2013 if (ev->status == 0)
2014 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2015 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2016 else
2017 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2018
2019 check_auth:
2020 if (!conn)
2021 goto unlock;
2022
2023 if (!hci_outgoing_auth_needed(hdev, conn))
2024 goto unlock;
2025
2026 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2027 struct hci_cp_auth_requested cp;
2028 cp.handle = __cpu_to_le16(conn->handle);
2029 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2030 }
2031
2032 unlock:
2033 hci_dev_unlock(hdev);
2034 }
2035
2036 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2037 {
2038 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2039 struct hci_conn *conn;
2040
2041 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2042
2043 hci_dev_lock(hdev);
2044
2045 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2046 if (conn) {
2047 if (!ev->status) {
2048 if (ev->encrypt) {
2049 /* Encryption implies authentication */
2050 conn->link_mode |= HCI_LM_AUTH;
2051 conn->link_mode |= HCI_LM_ENCRYPT;
2052 conn->sec_level = conn->pending_sec_level;
2053 } else
2054 conn->link_mode &= ~HCI_LM_ENCRYPT;
2055 }
2056
2057 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2058
2059 if (ev->status && conn->state == BT_CONNECTED) {
2060 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2061 hci_conn_put(conn);
2062 goto unlock;
2063 }
2064
2065 if (conn->state == BT_CONFIG) {
2066 if (!ev->status)
2067 conn->state = BT_CONNECTED;
2068
2069 hci_proto_connect_cfm(conn, ev->status);
2070 hci_conn_put(conn);
2071 } else
2072 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2073 }
2074
2075 unlock:
2076 hci_dev_unlock(hdev);
2077 }
2078
2079 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2080 struct sk_buff *skb)
2081 {
2082 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2083 struct hci_conn *conn;
2084
2085 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2086
2087 hci_dev_lock(hdev);
2088
2089 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2090 if (conn) {
2091 if (!ev->status)
2092 conn->link_mode |= HCI_LM_SECURE;
2093
2094 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2095
2096 hci_key_change_cfm(conn, ev->status);
2097 }
2098
2099 hci_dev_unlock(hdev);
2100 }
2101
2102 static void hci_remote_features_evt(struct hci_dev *hdev,
2103 struct sk_buff *skb)
2104 {
2105 struct hci_ev_remote_features *ev = (void *) skb->data;
2106 struct hci_conn *conn;
2107
2108 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2109
2110 hci_dev_lock(hdev);
2111
2112 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2113 if (!conn)
2114 goto unlock;
2115
2116 if (!ev->status)
2117 memcpy(conn->features, ev->features, 8);
2118
2119 if (conn->state != BT_CONFIG)
2120 goto unlock;
2121
2122 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2123 struct hci_cp_read_remote_ext_features cp;
2124 cp.handle = ev->handle;
2125 cp.page = 0x01;
2126 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2127 sizeof(cp), &cp);
2128 goto unlock;
2129 }
2130
2131 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2132 struct hci_cp_remote_name_req cp;
2133 memset(&cp, 0, sizeof(cp));
2134 bacpy(&cp.bdaddr, &conn->dst);
2135 cp.pscan_rep_mode = 0x02;
2136 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2137 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2138 mgmt_device_connected(hdev, &conn->dst, conn->type,
2139 conn->dst_type, 0, NULL, 0,
2140 conn->dev_class);
2141
2142 if (!hci_outgoing_auth_needed(hdev, conn)) {
2143 conn->state = BT_CONNECTED;
2144 hci_proto_connect_cfm(conn, ev->status);
2145 hci_conn_put(conn);
2146 }
2147
2148 unlock:
2149 hci_dev_unlock(hdev);
2150 }
2151
2152 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2153 {
2154 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2155 u8 status = skb->data[sizeof(*ev)];
2156 __u16 opcode;
2157
2158 skb_pull(skb, sizeof(*ev));
2159
2160 opcode = __le16_to_cpu(ev->opcode);
2161
2162 switch (opcode) {
2163 case HCI_OP_INQUIRY_CANCEL:
2164 hci_cc_inquiry_cancel(hdev, skb);
2165 break;
2166
2167 case HCI_OP_PERIODIC_INQ:
2168 hci_cc_periodic_inq(hdev, skb);
2169 break;
2170
2171 case HCI_OP_EXIT_PERIODIC_INQ:
2172 hci_cc_exit_periodic_inq(hdev, skb);
2173 break;
2174
2175 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2176 hci_cc_remote_name_req_cancel(hdev, skb);
2177 break;
2178
2179 case HCI_OP_ROLE_DISCOVERY:
2180 hci_cc_role_discovery(hdev, skb);
2181 break;
2182
2183 case HCI_OP_READ_LINK_POLICY:
2184 hci_cc_read_link_policy(hdev, skb);
2185 break;
2186
2187 case HCI_OP_WRITE_LINK_POLICY:
2188 hci_cc_write_link_policy(hdev, skb);
2189 break;
2190
2191 case HCI_OP_READ_DEF_LINK_POLICY:
2192 hci_cc_read_def_link_policy(hdev, skb);
2193 break;
2194
2195 case HCI_OP_WRITE_DEF_LINK_POLICY:
2196 hci_cc_write_def_link_policy(hdev, skb);
2197 break;
2198
2199 case HCI_OP_RESET:
2200 hci_cc_reset(hdev, skb);
2201 break;
2202
2203 case HCI_OP_WRITE_LOCAL_NAME:
2204 hci_cc_write_local_name(hdev, skb);
2205 break;
2206
2207 case HCI_OP_READ_LOCAL_NAME:
2208 hci_cc_read_local_name(hdev, skb);
2209 break;
2210
2211 case HCI_OP_WRITE_AUTH_ENABLE:
2212 hci_cc_write_auth_enable(hdev, skb);
2213 break;
2214
2215 case HCI_OP_WRITE_ENCRYPT_MODE:
2216 hci_cc_write_encrypt_mode(hdev, skb);
2217 break;
2218
2219 case HCI_OP_WRITE_SCAN_ENABLE:
2220 hci_cc_write_scan_enable(hdev, skb);
2221 break;
2222
2223 case HCI_OP_READ_CLASS_OF_DEV:
2224 hci_cc_read_class_of_dev(hdev, skb);
2225 break;
2226
2227 case HCI_OP_WRITE_CLASS_OF_DEV:
2228 hci_cc_write_class_of_dev(hdev, skb);
2229 break;
2230
2231 case HCI_OP_READ_VOICE_SETTING:
2232 hci_cc_read_voice_setting(hdev, skb);
2233 break;
2234
2235 case HCI_OP_WRITE_VOICE_SETTING:
2236 hci_cc_write_voice_setting(hdev, skb);
2237 break;
2238
2239 case HCI_OP_WRITE_SSP_MODE:
2240 hci_cc_write_ssp_mode(hdev, skb);
2241 break;
2242
2243 case HCI_OP_READ_LOCAL_VERSION:
2244 hci_cc_read_local_version(hdev, skb);
2245 break;
2246
2247 case HCI_OP_READ_LOCAL_COMMANDS:
2248 hci_cc_read_local_commands(hdev, skb);
2249 break;
2250
2251 case HCI_OP_READ_LOCAL_FEATURES:
2252 hci_cc_read_local_features(hdev, skb);
2253 break;
2254
2255 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2256 hci_cc_read_local_ext_features(hdev, skb);
2257 break;
2258
2259 case HCI_OP_READ_BUFFER_SIZE:
2260 hci_cc_read_buffer_size(hdev, skb);
2261 break;
2262
2263 case HCI_OP_READ_BD_ADDR:
2264 hci_cc_read_bd_addr(hdev, skb);
2265 break;
2266
2267 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2268 hci_cc_read_page_scan_activity(hdev, skb);
2269 break;
2270
2271 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2272 hci_cc_write_page_scan_activity(hdev, skb);
2273 break;
2274
2275 case HCI_OP_READ_PAGE_SCAN_TYPE:
2276 hci_cc_read_page_scan_type(hdev, skb);
2277 break;
2278
2279 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2280 hci_cc_write_page_scan_type(hdev, skb);
2281 break;
2282
2283 case HCI_OP_READ_DATA_BLOCK_SIZE:
2284 hci_cc_read_data_block_size(hdev, skb);
2285 break;
2286
2287 case HCI_OP_READ_FLOW_CONTROL_MODE:
2288 hci_cc_read_flow_control_mode(hdev, skb);
2289 break;
2290
2291 case HCI_OP_READ_LOCAL_AMP_INFO:
2292 hci_cc_read_local_amp_info(hdev, skb);
2293 break;
2294
2295 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2296 hci_cc_read_local_amp_assoc(hdev, skb);
2297 break;
2298
2299 case HCI_OP_READ_INQ_RSP_TX_POWER:
2300 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2301 break;
2302
2303 case HCI_OP_PIN_CODE_REPLY:
2304 hci_cc_pin_code_reply(hdev, skb);
2305 break;
2306
2307 case HCI_OP_PIN_CODE_NEG_REPLY:
2308 hci_cc_pin_code_neg_reply(hdev, skb);
2309 break;
2310
2311 case HCI_OP_READ_LOCAL_OOB_DATA:
2312 hci_cc_read_local_oob_data_reply(hdev, skb);
2313 break;
2314
2315 case HCI_OP_LE_READ_BUFFER_SIZE:
2316 hci_cc_le_read_buffer_size(hdev, skb);
2317 break;
2318
2319 case HCI_OP_LE_READ_LOCAL_FEATURES:
2320 hci_cc_le_read_local_features(hdev, skb);
2321 break;
2322
2323 case HCI_OP_LE_READ_ADV_TX_POWER:
2324 hci_cc_le_read_adv_tx_power(hdev, skb);
2325 break;
2326
2327 case HCI_OP_USER_CONFIRM_REPLY:
2328 hci_cc_user_confirm_reply(hdev, skb);
2329 break;
2330
2331 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2332 hci_cc_user_confirm_neg_reply(hdev, skb);
2333 break;
2334
2335 case HCI_OP_USER_PASSKEY_REPLY:
2336 hci_cc_user_passkey_reply(hdev, skb);
2337 break;
2338
2339 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2340 hci_cc_user_passkey_neg_reply(hdev, skb);
2341 break;
2342
2343 case HCI_OP_LE_SET_SCAN_PARAM:
2344 hci_cc_le_set_scan_param(hdev, skb);
2345 break;
2346
2347 case HCI_OP_LE_SET_ADV_ENABLE:
2348 hci_cc_le_set_adv_enable(hdev, skb);
2349 break;
2350
2351 case HCI_OP_LE_SET_SCAN_ENABLE:
2352 hci_cc_le_set_scan_enable(hdev, skb);
2353 break;
2354
2355 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2356 hci_cc_le_read_white_list_size(hdev, skb);
2357 break;
2358
2359 case HCI_OP_LE_READ_SUPPORTED_STATES:
2360 hci_cc_le_read_supported_states(hdev, skb);
2361 break;
2362
2363 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2364 hci_cc_write_le_host_supported(hdev, skb);
2365 break;
2366
2367 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2368 hci_cc_write_remote_amp_assoc(hdev, skb);
2369 break;
2370
2371 default:
2372 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2373 break;
2374 }
2375
2376 if (opcode != HCI_OP_NOP)
2377 del_timer(&hdev->cmd_timer);
2378
2379 hci_req_cmd_complete(hdev, opcode, status);
2380
2381 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2382 atomic_set(&hdev->cmd_cnt, 1);
2383 if (!skb_queue_empty(&hdev->cmd_q))
2384 queue_work(hdev->workqueue, &hdev->cmd_work);
2385 }
2386 }
2387
2388 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2389 {
2390 struct hci_ev_cmd_status *ev = (void *) skb->data;
2391 __u16 opcode;
2392
2393 skb_pull(skb, sizeof(*ev));
2394
2395 opcode = __le16_to_cpu(ev->opcode);
2396
2397 switch (opcode) {
2398 case HCI_OP_INQUIRY:
2399 hci_cs_inquiry(hdev, ev->status);
2400 break;
2401
2402 case HCI_OP_CREATE_CONN:
2403 hci_cs_create_conn(hdev, ev->status);
2404 break;
2405
2406 case HCI_OP_ADD_SCO:
2407 hci_cs_add_sco(hdev, ev->status);
2408 break;
2409
2410 case HCI_OP_AUTH_REQUESTED:
2411 hci_cs_auth_requested(hdev, ev->status);
2412 break;
2413
2414 case HCI_OP_SET_CONN_ENCRYPT:
2415 hci_cs_set_conn_encrypt(hdev, ev->status);
2416 break;
2417
2418 case HCI_OP_REMOTE_NAME_REQ:
2419 hci_cs_remote_name_req(hdev, ev->status);
2420 break;
2421
2422 case HCI_OP_READ_REMOTE_FEATURES:
2423 hci_cs_read_remote_features(hdev, ev->status);
2424 break;
2425
2426 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2427 hci_cs_read_remote_ext_features(hdev, ev->status);
2428 break;
2429
2430 case HCI_OP_SETUP_SYNC_CONN:
2431 hci_cs_setup_sync_conn(hdev, ev->status);
2432 break;
2433
2434 case HCI_OP_SNIFF_MODE:
2435 hci_cs_sniff_mode(hdev, ev->status);
2436 break;
2437
2438 case HCI_OP_EXIT_SNIFF_MODE:
2439 hci_cs_exit_sniff_mode(hdev, ev->status);
2440 break;
2441
2442 case HCI_OP_DISCONNECT:
2443 hci_cs_disconnect(hdev, ev->status);
2444 break;
2445
2446 case HCI_OP_LE_CREATE_CONN:
2447 hci_cs_le_create_conn(hdev, ev->status);
2448 break;
2449
2450 case HCI_OP_CREATE_PHY_LINK:
2451 hci_cs_create_phylink(hdev, ev->status);
2452 break;
2453
2454 case HCI_OP_ACCEPT_PHY_LINK:
2455 hci_cs_accept_phylink(hdev, ev->status);
2456 break;
2457
2458 default:
2459 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2460 break;
2461 }
2462
2463 if (opcode != HCI_OP_NOP)
2464 del_timer(&hdev->cmd_timer);
2465
2466 if (ev->status ||
2467 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2468 hci_req_cmd_complete(hdev, opcode, ev->status);
2469
2470 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2471 atomic_set(&hdev->cmd_cnt, 1);
2472 if (!skb_queue_empty(&hdev->cmd_q))
2473 queue_work(hdev->workqueue, &hdev->cmd_work);
2474 }
2475 }
2476
2477 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2478 {
2479 struct hci_ev_role_change *ev = (void *) skb->data;
2480 struct hci_conn *conn;
2481
2482 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2483
2484 hci_dev_lock(hdev);
2485
2486 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2487 if (conn) {
2488 if (!ev->status) {
2489 if (ev->role)
2490 conn->link_mode &= ~HCI_LM_MASTER;
2491 else
2492 conn->link_mode |= HCI_LM_MASTER;
2493 }
2494
2495 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2496
2497 hci_role_switch_cfm(conn, ev->status, ev->role);
2498 }
2499
2500 hci_dev_unlock(hdev);
2501 }
2502
2503 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2504 {
2505 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2506 int i;
2507
2508 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2509 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2510 return;
2511 }
2512
2513 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2514 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2515 BT_DBG("%s bad parameters", hdev->name);
2516 return;
2517 }
2518
2519 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2520
2521 for (i = 0; i < ev->num_hndl; i++) {
2522 struct hci_comp_pkts_info *info = &ev->handles[i];
2523 struct hci_conn *conn;
2524 __u16 handle, count;
2525
2526 handle = __le16_to_cpu(info->handle);
2527 count = __le16_to_cpu(info->count);
2528
2529 conn = hci_conn_hash_lookup_handle(hdev, handle);
2530 if (!conn)
2531 continue;
2532
2533 conn->sent -= count;
2534
2535 switch (conn->type) {
2536 case ACL_LINK:
2537 hdev->acl_cnt += count;
2538 if (hdev->acl_cnt > hdev->acl_pkts)
2539 hdev->acl_cnt = hdev->acl_pkts;
2540 break;
2541
2542 case LE_LINK:
2543 if (hdev->le_pkts) {
2544 hdev->le_cnt += count;
2545 if (hdev->le_cnt > hdev->le_pkts)
2546 hdev->le_cnt = hdev->le_pkts;
2547 } else {
2548 hdev->acl_cnt += count;
2549 if (hdev->acl_cnt > hdev->acl_pkts)
2550 hdev->acl_cnt = hdev->acl_pkts;
2551 }
2552 break;
2553
2554 case SCO_LINK:
2555 hdev->sco_cnt += count;
2556 if (hdev->sco_cnt > hdev->sco_pkts)
2557 hdev->sco_cnt = hdev->sco_pkts;
2558 break;
2559
2560 default:
2561 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2562 break;
2563 }
2564 }
2565
2566 queue_work(hdev->workqueue, &hdev->tx_work);
2567 }
2568
2569 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2570 __u16 handle)
2571 {
2572 struct hci_chan *chan;
2573
2574 switch (hdev->dev_type) {
2575 case HCI_BREDR:
2576 return hci_conn_hash_lookup_handle(hdev, handle);
2577 case HCI_AMP:
2578 chan = hci_chan_lookup_handle(hdev, handle);
2579 if (chan)
2580 return chan->conn;
2581 break;
2582 default:
2583 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2584 break;
2585 }
2586
2587 return NULL;
2588 }
2589
2590 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2591 {
2592 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2593 int i;
2594
2595 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2596 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2597 return;
2598 }
2599
2600 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2601 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2602 BT_DBG("%s bad parameters", hdev->name);
2603 return;
2604 }
2605
2606 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2607 ev->num_hndl);
2608
2609 for (i = 0; i < ev->num_hndl; i++) {
2610 struct hci_comp_blocks_info *info = &ev->handles[i];
2611 struct hci_conn *conn = NULL;
2612 __u16 handle, block_count;
2613
2614 handle = __le16_to_cpu(info->handle);
2615 block_count = __le16_to_cpu(info->blocks);
2616
2617 conn = __hci_conn_lookup_handle(hdev, handle);
2618 if (!conn)
2619 continue;
2620
2621 conn->sent -= block_count;
2622
2623 switch (conn->type) {
2624 case ACL_LINK:
2625 case AMP_LINK:
2626 hdev->block_cnt += block_count;
2627 if (hdev->block_cnt > hdev->num_blocks)
2628 hdev->block_cnt = hdev->num_blocks;
2629 break;
2630
2631 default:
2632 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2633 break;
2634 }
2635 }
2636
2637 queue_work(hdev->workqueue, &hdev->tx_work);
2638 }
2639
2640 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2641 {
2642 struct hci_ev_mode_change *ev = (void *) skb->data;
2643 struct hci_conn *conn;
2644
2645 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2646
2647 hci_dev_lock(hdev);
2648
2649 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2650 if (conn) {
2651 conn->mode = ev->mode;
2652 conn->interval = __le16_to_cpu(ev->interval);
2653
2654 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2655 &conn->flags)) {
2656 if (conn->mode == HCI_CM_ACTIVE)
2657 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2658 else
2659 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2660 }
2661
2662 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2663 hci_sco_setup(conn, ev->status);
2664 }
2665
2666 hci_dev_unlock(hdev);
2667 }
2668
2669 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2670 {
2671 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2672 struct hci_conn *conn;
2673
2674 BT_DBG("%s", hdev->name);
2675
2676 hci_dev_lock(hdev);
2677
2678 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2679 if (!conn)
2680 goto unlock;
2681
2682 if (conn->state == BT_CONNECTED) {
2683 hci_conn_hold(conn);
2684 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2685 hci_conn_put(conn);
2686 }
2687
2688 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2689 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2690 sizeof(ev->bdaddr), &ev->bdaddr);
2691 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2692 u8 secure;
2693
2694 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2695 secure = 1;
2696 else
2697 secure = 0;
2698
2699 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2700 }
2701
2702 unlock:
2703 hci_dev_unlock(hdev);
2704 }
2705
2706 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2707 {
2708 struct hci_ev_link_key_req *ev = (void *) skb->data;
2709 struct hci_cp_link_key_reply cp;
2710 struct hci_conn *conn;
2711 struct link_key *key;
2712
2713 BT_DBG("%s", hdev->name);
2714
2715 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2716 return;
2717
2718 hci_dev_lock(hdev);
2719
2720 key = hci_find_link_key(hdev, &ev->bdaddr);
2721 if (!key) {
2722 BT_DBG("%s link key not found for %pMR", hdev->name,
2723 &ev->bdaddr);
2724 goto not_found;
2725 }
2726
2727 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2728 &ev->bdaddr);
2729
2730 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2731 key->type == HCI_LK_DEBUG_COMBINATION) {
2732 BT_DBG("%s ignoring debug key", hdev->name);
2733 goto not_found;
2734 }
2735
2736 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2737 if (conn) {
2738 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2739 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2740 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2741 goto not_found;
2742 }
2743
2744 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2745 conn->pending_sec_level == BT_SECURITY_HIGH) {
2746 BT_DBG("%s ignoring key unauthenticated for high security",
2747 hdev->name);
2748 goto not_found;
2749 }
2750
2751 conn->key_type = key->type;
2752 conn->pin_length = key->pin_len;
2753 }
2754
2755 bacpy(&cp.bdaddr, &ev->bdaddr);
2756 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2757
2758 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2759
2760 hci_dev_unlock(hdev);
2761
2762 return;
2763
2764 not_found:
2765 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2766 hci_dev_unlock(hdev);
2767 }
2768
2769 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2770 {
2771 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2772 struct hci_conn *conn;
2773 u8 pin_len = 0;
2774
2775 BT_DBG("%s", hdev->name);
2776
2777 hci_dev_lock(hdev);
2778
2779 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2780 if (conn) {
2781 hci_conn_hold(conn);
2782 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2783 pin_len = conn->pin_length;
2784
2785 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2786 conn->key_type = ev->key_type;
2787
2788 hci_conn_put(conn);
2789 }
2790
2791 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2792 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2793 ev->key_type, pin_len);
2794
2795 hci_dev_unlock(hdev);
2796 }
2797
2798 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2799 {
2800 struct hci_ev_clock_offset *ev = (void *) skb->data;
2801 struct hci_conn *conn;
2802
2803 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2804
2805 hci_dev_lock(hdev);
2806
2807 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2808 if (conn && !ev->status) {
2809 struct inquiry_entry *ie;
2810
2811 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2812 if (ie) {
2813 ie->data.clock_offset = ev->clock_offset;
2814 ie->timestamp = jiffies;
2815 }
2816 }
2817
2818 hci_dev_unlock(hdev);
2819 }
2820
2821 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2822 {
2823 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2824 struct hci_conn *conn;
2825
2826 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2827
2828 hci_dev_lock(hdev);
2829
2830 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2831 if (conn && !ev->status)
2832 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2833
2834 hci_dev_unlock(hdev);
2835 }
2836
2837 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2838 {
2839 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2840 struct inquiry_entry *ie;
2841
2842 BT_DBG("%s", hdev->name);
2843
2844 hci_dev_lock(hdev);
2845
2846 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2847 if (ie) {
2848 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2849 ie->timestamp = jiffies;
2850 }
2851
2852 hci_dev_unlock(hdev);
2853 }
2854
2855 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2856 struct sk_buff *skb)
2857 {
2858 struct inquiry_data data;
2859 int num_rsp = *((__u8 *) skb->data);
2860 bool name_known, ssp;
2861
2862 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2863
2864 if (!num_rsp)
2865 return;
2866
2867 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2868 return;
2869
2870 hci_dev_lock(hdev);
2871
2872 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2873 struct inquiry_info_with_rssi_and_pscan_mode *info;
2874 info = (void *) (skb->data + 1);
2875
2876 for (; num_rsp; num_rsp--, info++) {
2877 bacpy(&data.bdaddr, &info->bdaddr);
2878 data.pscan_rep_mode = info->pscan_rep_mode;
2879 data.pscan_period_mode = info->pscan_period_mode;
2880 data.pscan_mode = info->pscan_mode;
2881 memcpy(data.dev_class, info->dev_class, 3);
2882 data.clock_offset = info->clock_offset;
2883 data.rssi = info->rssi;
2884 data.ssp_mode = 0x00;
2885
2886 name_known = hci_inquiry_cache_update(hdev, &data,
2887 false, &ssp);
2888 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2889 info->dev_class, info->rssi,
2890 !name_known, ssp, NULL, 0);
2891 }
2892 } else {
2893 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2894
2895 for (; num_rsp; num_rsp--, info++) {
2896 bacpy(&data.bdaddr, &info->bdaddr);
2897 data.pscan_rep_mode = info->pscan_rep_mode;
2898 data.pscan_period_mode = info->pscan_period_mode;
2899 data.pscan_mode = 0x00;
2900 memcpy(data.dev_class, info->dev_class, 3);
2901 data.clock_offset = info->clock_offset;
2902 data.rssi = info->rssi;
2903 data.ssp_mode = 0x00;
2904 name_known = hci_inquiry_cache_update(hdev, &data,
2905 false, &ssp);
2906 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2907 info->dev_class, info->rssi,
2908 !name_known, ssp, NULL, 0);
2909 }
2910 }
2911
2912 hci_dev_unlock(hdev);
2913 }
2914
2915 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2916 struct sk_buff *skb)
2917 {
2918 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2919 struct hci_conn *conn;
2920
2921 BT_DBG("%s", hdev->name);
2922
2923 hci_dev_lock(hdev);
2924
2925 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2926 if (!conn)
2927 goto unlock;
2928
2929 if (!ev->status && ev->page == 0x01) {
2930 struct inquiry_entry *ie;
2931
2932 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2933 if (ie)
2934 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2935
2936 if (ev->features[0] & LMP_HOST_SSP)
2937 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2938 }
2939
2940 if (conn->state != BT_CONFIG)
2941 goto unlock;
2942
2943 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2944 struct hci_cp_remote_name_req cp;
2945 memset(&cp, 0, sizeof(cp));
2946 bacpy(&cp.bdaddr, &conn->dst);
2947 cp.pscan_rep_mode = 0x02;
2948 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2949 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2950 mgmt_device_connected(hdev, &conn->dst, conn->type,
2951 conn->dst_type, 0, NULL, 0,
2952 conn->dev_class);
2953
2954 if (!hci_outgoing_auth_needed(hdev, conn)) {
2955 conn->state = BT_CONNECTED;
2956 hci_proto_connect_cfm(conn, ev->status);
2957 hci_conn_put(conn);
2958 }
2959
2960 unlock:
2961 hci_dev_unlock(hdev);
2962 }
2963
2964 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2965 struct sk_buff *skb)
2966 {
2967 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2968 struct hci_conn *conn;
2969
2970 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2971
2972 hci_dev_lock(hdev);
2973
2974 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2975 if (!conn) {
2976 if (ev->link_type == ESCO_LINK)
2977 goto unlock;
2978
2979 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2980 if (!conn)
2981 goto unlock;
2982
2983 conn->type = SCO_LINK;
2984 }
2985
2986 switch (ev->status) {
2987 case 0x00:
2988 conn->handle = __le16_to_cpu(ev->handle);
2989 conn->state = BT_CONNECTED;
2990
2991 hci_conn_hold_device(conn);
2992 hci_conn_add_sysfs(conn);
2993 break;
2994
2995 case 0x11: /* Unsupported Feature or Parameter Value */
2996 case 0x1c: /* SCO interval rejected */
2997 case 0x1a: /* Unsupported Remote Feature */
2998 case 0x1f: /* Unspecified error */
2999 if (conn->out && conn->attempt < 2) {
3000 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3001 (hdev->esco_type & EDR_ESCO_MASK);
3002 hci_setup_sync(conn, conn->link->handle);
3003 goto unlock;
3004 }
3005 /* fall through */
3006
3007 default:
3008 conn->state = BT_CLOSED;
3009 break;
3010 }
3011
3012 hci_proto_connect_cfm(conn, ev->status);
3013 if (ev->status)
3014 hci_conn_del(conn);
3015
3016 unlock:
3017 hci_dev_unlock(hdev);
3018 }
3019
3020 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3021 struct sk_buff *skb)
3022 {
3023 struct inquiry_data data;
3024 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3025 int num_rsp = *((__u8 *) skb->data);
3026 size_t eir_len;
3027
3028 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3029
3030 if (!num_rsp)
3031 return;
3032
3033 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3034 return;
3035
3036 hci_dev_lock(hdev);
3037
3038 for (; num_rsp; num_rsp--, info++) {
3039 bool name_known, ssp;
3040
3041 bacpy(&data.bdaddr, &info->bdaddr);
3042 data.pscan_rep_mode = info->pscan_rep_mode;
3043 data.pscan_period_mode = info->pscan_period_mode;
3044 data.pscan_mode = 0x00;
3045 memcpy(data.dev_class, info->dev_class, 3);
3046 data.clock_offset = info->clock_offset;
3047 data.rssi = info->rssi;
3048 data.ssp_mode = 0x01;
3049
3050 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3051 name_known = eir_has_data_type(info->data,
3052 sizeof(info->data),
3053 EIR_NAME_COMPLETE);
3054 else
3055 name_known = true;
3056
3057 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3058 &ssp);
3059 eir_len = eir_get_length(info->data, sizeof(info->data));
3060 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3061 info->dev_class, info->rssi, !name_known,
3062 ssp, info->data, eir_len);
3063 }
3064
3065 hci_dev_unlock(hdev);
3066 }
3067
3068 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3069 struct sk_buff *skb)
3070 {
3071 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3072 struct hci_conn *conn;
3073
3074 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3075 __le16_to_cpu(ev->handle));
3076
3077 hci_dev_lock(hdev);
3078
3079 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3080 if (!conn)
3081 goto unlock;
3082
3083 if (!ev->status)
3084 conn->sec_level = conn->pending_sec_level;
3085
3086 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3087
3088 if (ev->status && conn->state == BT_CONNECTED) {
3089 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3090 hci_conn_put(conn);
3091 goto unlock;
3092 }
3093
3094 if (conn->state == BT_CONFIG) {
3095 if (!ev->status)
3096 conn->state = BT_CONNECTED;
3097
3098 hci_proto_connect_cfm(conn, ev->status);
3099 hci_conn_put(conn);
3100 } else {
3101 hci_auth_cfm(conn, ev->status);
3102
3103 hci_conn_hold(conn);
3104 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3105 hci_conn_put(conn);
3106 }
3107
3108 unlock:
3109 hci_dev_unlock(hdev);
3110 }
3111
3112 static u8 hci_get_auth_req(struct hci_conn *conn)
3113 {
3114 /* If remote requests dedicated bonding follow that lead */
3115 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3116 /* If both remote and local IO capabilities allow MITM
3117 * protection then require it, otherwise don't */
3118 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3119 return 0x02;
3120 else
3121 return 0x03;
3122 }
3123
3124 /* If remote requests no-bonding follow that lead */
3125 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3126 return conn->remote_auth | (conn->auth_type & 0x01);
3127
3128 return conn->auth_type;
3129 }
3130
3131 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3132 {
3133 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3134 struct hci_conn *conn;
3135
3136 BT_DBG("%s", hdev->name);
3137
3138 hci_dev_lock(hdev);
3139
3140 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3141 if (!conn)
3142 goto unlock;
3143
3144 hci_conn_hold(conn);
3145
3146 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3147 goto unlock;
3148
3149 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3150 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3151 struct hci_cp_io_capability_reply cp;
3152
3153 bacpy(&cp.bdaddr, &ev->bdaddr);
3154 /* Change the IO capability from KeyboardDisplay
3155 * to DisplayYesNo as it is not supported by BT spec. */
3156 cp.capability = (conn->io_capability == 0x04) ?
3157 0x01 : conn->io_capability;
3158 conn->auth_type = hci_get_auth_req(conn);
3159 cp.authentication = conn->auth_type;
3160
3161 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3162 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3163 cp.oob_data = 0x01;
3164 else
3165 cp.oob_data = 0x00;
3166
3167 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3168 sizeof(cp), &cp);
3169 } else {
3170 struct hci_cp_io_capability_neg_reply cp;
3171
3172 bacpy(&cp.bdaddr, &ev->bdaddr);
3173 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3174
3175 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3176 sizeof(cp), &cp);
3177 }
3178
3179 unlock:
3180 hci_dev_unlock(hdev);
3181 }
3182
3183 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3184 {
3185 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3186 struct hci_conn *conn;
3187
3188 BT_DBG("%s", hdev->name);
3189
3190 hci_dev_lock(hdev);
3191
3192 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3193 if (!conn)
3194 goto unlock;
3195
3196 conn->remote_cap = ev->capability;
3197 conn->remote_auth = ev->authentication;
3198 if (ev->oob_data)
3199 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3200
3201 unlock:
3202 hci_dev_unlock(hdev);
3203 }
3204
3205 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3206 struct sk_buff *skb)
3207 {
3208 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3209 int loc_mitm, rem_mitm, confirm_hint = 0;
3210 struct hci_conn *conn;
3211
3212 BT_DBG("%s", hdev->name);
3213
3214 hci_dev_lock(hdev);
3215
3216 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3217 goto unlock;
3218
3219 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3220 if (!conn)
3221 goto unlock;
3222
3223 loc_mitm = (conn->auth_type & 0x01);
3224 rem_mitm = (conn->remote_auth & 0x01);
3225
3226 /* If we require MITM but the remote device can't provide that
3227 * (it has NoInputNoOutput) then reject the confirmation
3228 * request. The only exception is when we're dedicated bonding
3229 * initiators (connect_cfm_cb set) since then we always have the MITM
3230 * bit set. */
3231 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3232 BT_DBG("Rejecting request: remote device can't provide MITM");
3233 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3234 sizeof(ev->bdaddr), &ev->bdaddr);
3235 goto unlock;
3236 }
3237
3238 /* If no side requires MITM protection; auto-accept */
3239 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3240 (!rem_mitm || conn->io_capability == 0x03)) {
3241
3242 /* If we're not the initiators request authorization to
3243 * proceed from user space (mgmt_user_confirm with
3244 * confirm_hint set to 1). */
3245 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3246 BT_DBG("Confirming auto-accept as acceptor");
3247 confirm_hint = 1;
3248 goto confirm;
3249 }
3250
3251 BT_DBG("Auto-accept of user confirmation with %ums delay",
3252 hdev->auto_accept_delay);
3253
3254 if (hdev->auto_accept_delay > 0) {
3255 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3256 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3257 goto unlock;
3258 }
3259
3260 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3261 sizeof(ev->bdaddr), &ev->bdaddr);
3262 goto unlock;
3263 }
3264
3265 confirm:
3266 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3267 confirm_hint);
3268
3269 unlock:
3270 hci_dev_unlock(hdev);
3271 }
3272
3273 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3274 struct sk_buff *skb)
3275 {
3276 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3277
3278 BT_DBG("%s", hdev->name);
3279
3280 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3281 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3282 }
3283
3284 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3285 struct sk_buff *skb)
3286 {
3287 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3288 struct hci_conn *conn;
3289
3290 BT_DBG("%s", hdev->name);
3291
3292 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3293 if (!conn)
3294 return;
3295
3296 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3297 conn->passkey_entered = 0;
3298
3299 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3300 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3301 conn->dst_type, conn->passkey_notify,
3302 conn->passkey_entered);
3303 }
3304
3305 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3306 {
3307 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3308 struct hci_conn *conn;
3309
3310 BT_DBG("%s", hdev->name);
3311
3312 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3313 if (!conn)
3314 return;
3315
3316 switch (ev->type) {
3317 case HCI_KEYPRESS_STARTED:
3318 conn->passkey_entered = 0;
3319 return;
3320
3321 case HCI_KEYPRESS_ENTERED:
3322 conn->passkey_entered++;
3323 break;
3324
3325 case HCI_KEYPRESS_ERASED:
3326 conn->passkey_entered--;
3327 break;
3328
3329 case HCI_KEYPRESS_CLEARED:
3330 conn->passkey_entered = 0;
3331 break;
3332
3333 case HCI_KEYPRESS_COMPLETED:
3334 return;
3335 }
3336
3337 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3338 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3339 conn->dst_type, conn->passkey_notify,
3340 conn->passkey_entered);
3341 }
3342
3343 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3344 struct sk_buff *skb)
3345 {
3346 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3347 struct hci_conn *conn;
3348
3349 BT_DBG("%s", hdev->name);
3350
3351 hci_dev_lock(hdev);
3352
3353 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3354 if (!conn)
3355 goto unlock;
3356
3357 /* To avoid duplicate auth_failed events to user space we check
3358 * the HCI_CONN_AUTH_PEND flag which will be set if we
3359 * initiated the authentication. A traditional auth_complete
3360 * event gets always produced as initiator and is also mapped to
3361 * the mgmt_auth_failed event */
3362 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3363 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3364 ev->status);
3365
3366 hci_conn_put(conn);
3367
3368 unlock:
3369 hci_dev_unlock(hdev);
3370 }
3371
3372 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3373 struct sk_buff *skb)
3374 {
3375 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3376 struct inquiry_entry *ie;
3377
3378 BT_DBG("%s", hdev->name);
3379
3380 hci_dev_lock(hdev);
3381
3382 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3383 if (ie)
3384 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3385
3386 hci_dev_unlock(hdev);
3387 }
3388
3389 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3390 struct sk_buff *skb)
3391 {
3392 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3393 struct oob_data *data;
3394
3395 BT_DBG("%s", hdev->name);
3396
3397 hci_dev_lock(hdev);
3398
3399 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3400 goto unlock;
3401
3402 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3403 if (data) {
3404 struct hci_cp_remote_oob_data_reply cp;
3405
3406 bacpy(&cp.bdaddr, &ev->bdaddr);
3407 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3408 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3409
3410 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3411 &cp);
3412 } else {
3413 struct hci_cp_remote_oob_data_neg_reply cp;
3414
3415 bacpy(&cp.bdaddr, &ev->bdaddr);
3416 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3417 &cp);
3418 }
3419
3420 unlock:
3421 hci_dev_unlock(hdev);
3422 }
3423
3424 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3425 struct sk_buff *skb)
3426 {
3427 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3428 struct hci_conn *hcon, *bredr_hcon;
3429
3430 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3431 ev->status);
3432
3433 hci_dev_lock(hdev);
3434
3435 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3436 if (!hcon) {
3437 hci_dev_unlock(hdev);
3438 return;
3439 }
3440
3441 if (ev->status) {
3442 hci_conn_del(hcon);
3443 hci_dev_unlock(hdev);
3444 return;
3445 }
3446
3447 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3448
3449 hcon->state = BT_CONNECTED;
3450 bacpy(&hcon->dst, &bredr_hcon->dst);
3451
3452 hci_conn_hold(hcon);
3453 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3454 hci_conn_put(hcon);
3455
3456 hci_conn_hold_device(hcon);
3457 hci_conn_add_sysfs(hcon);
3458
3459 amp_physical_cfm(bredr_hcon, hcon);
3460
3461 hci_dev_unlock(hdev);
3462 }
3463
3464 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3465 {
3466 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3467 struct hci_conn *hcon;
3468 struct hci_chan *hchan;
3469 struct amp_mgr *mgr;
3470
3471 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3472 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3473 ev->status);
3474
3475 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3476 if (!hcon)
3477 return;
3478
3479 /* Create AMP hchan */
3480 hchan = hci_chan_create(hcon);
3481 if (!hchan)
3482 return;
3483
3484 hchan->handle = le16_to_cpu(ev->handle);
3485
3486 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3487
3488 mgr = hcon->amp_mgr;
3489 if (mgr && mgr->bredr_chan) {
3490 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3491
3492 l2cap_chan_lock(bredr_chan);
3493
3494 bredr_chan->conn->mtu = hdev->block_mtu;
3495 l2cap_logical_cfm(bredr_chan, hchan, 0);
3496 hci_conn_hold(hcon);
3497
3498 l2cap_chan_unlock(bredr_chan);
3499 }
3500 }
3501
3502 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3503 struct sk_buff *skb)
3504 {
3505 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3506 struct hci_chan *hchan;
3507
3508 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3509 le16_to_cpu(ev->handle), ev->status);
3510
3511 if (ev->status)
3512 return;
3513
3514 hci_dev_lock(hdev);
3515
3516 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3517 if (!hchan)
3518 goto unlock;
3519
3520 amp_destroy_logical_link(hchan, ev->reason);
3521
3522 unlock:
3523 hci_dev_unlock(hdev);
3524 }
3525
3526 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3527 struct sk_buff *skb)
3528 {
3529 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3530 struct hci_conn *hcon;
3531
3532 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3533
3534 if (ev->status)
3535 return;
3536
3537 hci_dev_lock(hdev);
3538
3539 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3540 if (hcon) {
3541 hcon->state = BT_CLOSED;
3542 hci_conn_del(hcon);
3543 }
3544
3545 hci_dev_unlock(hdev);
3546 }
3547
3548 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3549 {
3550 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3551 struct hci_conn *conn;
3552
3553 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3554
3555 hci_dev_lock(hdev);
3556
3557 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3558 if (!conn) {
3559 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3560 if (!conn) {
3561 BT_ERR("No memory for new connection");
3562 goto unlock;
3563 }
3564
3565 conn->dst_type = ev->bdaddr_type;
3566
3567 if (ev->role == LE_CONN_ROLE_MASTER) {
3568 conn->out = true;
3569 conn->link_mode |= HCI_LM_MASTER;
3570 }
3571 }
3572
3573 if (ev->status) {
3574 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3575 conn->dst_type, ev->status);
3576 hci_proto_connect_cfm(conn, ev->status);
3577 conn->state = BT_CLOSED;
3578 hci_conn_del(conn);
3579 goto unlock;
3580 }
3581
3582 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3583 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3584 conn->dst_type, 0, NULL, 0, NULL);
3585
3586 conn->sec_level = BT_SECURITY_LOW;
3587 conn->handle = __le16_to_cpu(ev->handle);
3588 conn->state = BT_CONNECTED;
3589
3590 hci_conn_hold_device(conn);
3591 hci_conn_add_sysfs(conn);
3592
3593 hci_proto_connect_cfm(conn, ev->status);
3594
3595 unlock:
3596 hci_dev_unlock(hdev);
3597 }
3598
3599 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3600 {
3601 u8 num_reports = skb->data[0];
3602 void *ptr = &skb->data[1];
3603 s8 rssi;
3604
3605 while (num_reports--) {
3606 struct hci_ev_le_advertising_info *ev = ptr;
3607
3608 rssi = ev->data[ev->length];
3609 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3610 NULL, rssi, 0, 1, ev->data, ev->length);
3611
3612 ptr += sizeof(*ev) + ev->length + 1;
3613 }
3614 }
3615
3616 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3617 {
3618 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3619 struct hci_cp_le_ltk_reply cp;
3620 struct hci_cp_le_ltk_neg_reply neg;
3621 struct hci_conn *conn;
3622 struct smp_ltk *ltk;
3623
3624 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3625
3626 hci_dev_lock(hdev);
3627
3628 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3629 if (conn == NULL)
3630 goto not_found;
3631
3632 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3633 if (ltk == NULL)
3634 goto not_found;
3635
3636 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3637 cp.handle = cpu_to_le16(conn->handle);
3638
3639 if (ltk->authenticated)
3640 conn->sec_level = BT_SECURITY_HIGH;
3641
3642 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3643
3644 if (ltk->type & HCI_SMP_STK) {
3645 list_del(&ltk->list);
3646 kfree(ltk);
3647 }
3648
3649 hci_dev_unlock(hdev);
3650
3651 return;
3652
3653 not_found:
3654 neg.handle = ev->handle;
3655 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3656 hci_dev_unlock(hdev);
3657 }
3658
3659 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3660 {
3661 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3662
3663 skb_pull(skb, sizeof(*le_ev));
3664
3665 switch (le_ev->subevent) {
3666 case HCI_EV_LE_CONN_COMPLETE:
3667 hci_le_conn_complete_evt(hdev, skb);
3668 break;
3669
3670 case HCI_EV_LE_ADVERTISING_REPORT:
3671 hci_le_adv_report_evt(hdev, skb);
3672 break;
3673
3674 case HCI_EV_LE_LTK_REQ:
3675 hci_le_ltk_request_evt(hdev, skb);
3676 break;
3677
3678 default:
3679 break;
3680 }
3681 }
3682
3683 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3684 {
3685 struct hci_ev_channel_selected *ev = (void *) skb->data;
3686 struct hci_conn *hcon;
3687
3688 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3689
3690 skb_pull(skb, sizeof(*ev));
3691
3692 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3693 if (!hcon)
3694 return;
3695
3696 amp_read_loc_assoc_final_data(hdev, hcon);
3697 }
3698
3699 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3700 {
3701 struct hci_event_hdr *hdr = (void *) skb->data;
3702 __u8 event = hdr->evt;
3703
3704 hci_dev_lock(hdev);
3705
3706 /* Received events are (currently) only needed when a request is
3707 * ongoing so avoid unnecessary memory allocation.
3708 */
3709 if (hdev->req_status == HCI_REQ_PEND) {
3710 kfree_skb(hdev->recv_evt);
3711 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3712 }
3713
3714 hci_dev_unlock(hdev);
3715
3716 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3717
3718 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3719 struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data;
3720 u16 opcode = __le16_to_cpu(hdr->opcode);
3721
3722 hci_req_cmd_complete(hdev, opcode, 0);
3723 }
3724
3725 switch (event) {
3726 case HCI_EV_INQUIRY_COMPLETE:
3727 hci_inquiry_complete_evt(hdev, skb);
3728 break;
3729
3730 case HCI_EV_INQUIRY_RESULT:
3731 hci_inquiry_result_evt(hdev, skb);
3732 break;
3733
3734 case HCI_EV_CONN_COMPLETE:
3735 hci_conn_complete_evt(hdev, skb);
3736 break;
3737
3738 case HCI_EV_CONN_REQUEST:
3739 hci_conn_request_evt(hdev, skb);
3740 break;
3741
3742 case HCI_EV_DISCONN_COMPLETE:
3743 hci_disconn_complete_evt(hdev, skb);
3744 break;
3745
3746 case HCI_EV_AUTH_COMPLETE:
3747 hci_auth_complete_evt(hdev, skb);
3748 break;
3749
3750 case HCI_EV_REMOTE_NAME:
3751 hci_remote_name_evt(hdev, skb);
3752 break;
3753
3754 case HCI_EV_ENCRYPT_CHANGE:
3755 hci_encrypt_change_evt(hdev, skb);
3756 break;
3757
3758 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3759 hci_change_link_key_complete_evt(hdev, skb);
3760 break;
3761
3762 case HCI_EV_REMOTE_FEATURES:
3763 hci_remote_features_evt(hdev, skb);
3764 break;
3765
3766 case HCI_EV_CMD_COMPLETE:
3767 hci_cmd_complete_evt(hdev, skb);
3768 break;
3769
3770 case HCI_EV_CMD_STATUS:
3771 hci_cmd_status_evt(hdev, skb);
3772 break;
3773
3774 case HCI_EV_ROLE_CHANGE:
3775 hci_role_change_evt(hdev, skb);
3776 break;
3777
3778 case HCI_EV_NUM_COMP_PKTS:
3779 hci_num_comp_pkts_evt(hdev, skb);
3780 break;
3781
3782 case HCI_EV_MODE_CHANGE:
3783 hci_mode_change_evt(hdev, skb);
3784 break;
3785
3786 case HCI_EV_PIN_CODE_REQ:
3787 hci_pin_code_request_evt(hdev, skb);
3788 break;
3789
3790 case HCI_EV_LINK_KEY_REQ:
3791 hci_link_key_request_evt(hdev, skb);
3792 break;
3793
3794 case HCI_EV_LINK_KEY_NOTIFY:
3795 hci_link_key_notify_evt(hdev, skb);
3796 break;
3797
3798 case HCI_EV_CLOCK_OFFSET:
3799 hci_clock_offset_evt(hdev, skb);
3800 break;
3801
3802 case HCI_EV_PKT_TYPE_CHANGE:
3803 hci_pkt_type_change_evt(hdev, skb);
3804 break;
3805
3806 case HCI_EV_PSCAN_REP_MODE:
3807 hci_pscan_rep_mode_evt(hdev, skb);
3808 break;
3809
3810 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3811 hci_inquiry_result_with_rssi_evt(hdev, skb);
3812 break;
3813
3814 case HCI_EV_REMOTE_EXT_FEATURES:
3815 hci_remote_ext_features_evt(hdev, skb);
3816 break;
3817
3818 case HCI_EV_SYNC_CONN_COMPLETE:
3819 hci_sync_conn_complete_evt(hdev, skb);
3820 break;
3821
3822 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3823 hci_extended_inquiry_result_evt(hdev, skb);
3824 break;
3825
3826 case HCI_EV_KEY_REFRESH_COMPLETE:
3827 hci_key_refresh_complete_evt(hdev, skb);
3828 break;
3829
3830 case HCI_EV_IO_CAPA_REQUEST:
3831 hci_io_capa_request_evt(hdev, skb);
3832 break;
3833
3834 case HCI_EV_IO_CAPA_REPLY:
3835 hci_io_capa_reply_evt(hdev, skb);
3836 break;
3837
3838 case HCI_EV_USER_CONFIRM_REQUEST:
3839 hci_user_confirm_request_evt(hdev, skb);
3840 break;
3841
3842 case HCI_EV_USER_PASSKEY_REQUEST:
3843 hci_user_passkey_request_evt(hdev, skb);
3844 break;
3845
3846 case HCI_EV_USER_PASSKEY_NOTIFY:
3847 hci_user_passkey_notify_evt(hdev, skb);
3848 break;
3849
3850 case HCI_EV_KEYPRESS_NOTIFY:
3851 hci_keypress_notify_evt(hdev, skb);
3852 break;
3853
3854 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3855 hci_simple_pair_complete_evt(hdev, skb);
3856 break;
3857
3858 case HCI_EV_REMOTE_HOST_FEATURES:
3859 hci_remote_host_features_evt(hdev, skb);
3860 break;
3861
3862 case HCI_EV_LE_META:
3863 hci_le_meta_evt(hdev, skb);
3864 break;
3865
3866 case HCI_EV_CHANNEL_SELECTED:
3867 hci_chan_selected_evt(hdev, skb);
3868 break;
3869
3870 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3871 hci_remote_oob_data_request_evt(hdev, skb);
3872 break;
3873
3874 case HCI_EV_PHY_LINK_COMPLETE:
3875 hci_phy_link_complete_evt(hdev, skb);
3876 break;
3877
3878 case HCI_EV_LOGICAL_LINK_COMPLETE:
3879 hci_loglink_complete_evt(hdev, skb);
3880 break;
3881
3882 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3883 hci_disconn_loglink_complete_evt(hdev, skb);
3884 break;
3885
3886 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3887 hci_disconn_phylink_complete_evt(hdev, skb);
3888 break;
3889
3890 case HCI_EV_NUM_COMP_BLOCKS:
3891 hci_num_comp_blocks_evt(hdev, skb);
3892 break;
3893
3894 default:
3895 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3896 break;
3897 }
3898
3899 kfree_skb(skb);
3900 hdev->stat.evt_rx++;
3901 }