Bluetooth: remove unneeded hci_conn_hold/put_device()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
34
35 /* Handle HCI Event packets */
36
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 __u8 status = *((__u8 *) skb->data);
40
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
42
43 if (status) {
44 hci_dev_lock(hdev);
45 mgmt_stop_discovery_failed(hdev, status);
46 hci_dev_unlock(hdev);
47 return;
48 }
49
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
52 wake_up_bit(&hdev->flags, HCI_INQUIRY);
53
54 hci_dev_lock(hdev);
55 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
56 hci_dev_unlock(hdev);
57
58 hci_conn_check_pending(hdev);
59 }
60
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 {
63 __u8 status = *((__u8 *) skb->data);
64
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
66
67 if (status)
68 return;
69
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 }
72
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 __u8 status = *((__u8 *) skb->data);
76
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78
79 if (status)
80 return;
81
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83
84 hci_conn_check_pending(hdev);
85 }
86
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct sk_buff *skb)
161 {
162 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163
164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165
166 if (rp->status)
167 return;
168
169 hdev->link_policy = __le16_to_cpu(rp->policy);
170 }
171
172 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 struct sk_buff *skb)
174 {
175 __u8 status = *((__u8 *) skb->data);
176 void *sent;
177
178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
179
180 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 if (!sent)
182 return;
183
184 if (!status)
185 hdev->link_policy = get_unaligned_le16(sent);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 /* Reset all non-persistent flags */
197 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198
199 hdev->discovery.state = DISCOVERY_STOPPED;
200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202
203 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 hdev->adv_data_len = 0;
205 }
206
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208 {
209 __u8 status = *((__u8 *) skb->data);
210 void *sent;
211
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
213
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215 if (!sent)
216 return;
217
218 hci_dev_lock(hdev);
219
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
222 else if (!status)
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224
225 hci_dev_unlock(hdev);
226 }
227
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
231
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233
234 if (rp->status)
235 return;
236
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
245
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
247
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 if (!sent)
250 return;
251
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
254
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
257 else
258 clear_bit(HCI_AUTH, &hdev->flags);
259 }
260
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
263 }
264
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
266 {
267 __u8 status = *((__u8 *) skb->data);
268 void *sent;
269
270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
271
272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
273 if (!sent)
274 return;
275
276 if (!status) {
277 __u8 param = *((__u8 *) sent);
278
279 if (param)
280 set_bit(HCI_ENCRYPT, &hdev->flags);
281 else
282 clear_bit(HCI_ENCRYPT, &hdev->flags);
283 }
284 }
285
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 {
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
290 void *sent;
291
292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
293
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 if (!sent)
296 return;
297
298 param = *((__u8 *) sent);
299
300 hci_dev_lock(hdev);
301
302 if (status) {
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
305 goto done;
306 }
307
308 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310
311 if (param & SCAN_INQUIRY) {
312 set_bit(HCI_ISCAN, &hdev->flags);
313 if (!old_iscan)
314 mgmt_discoverable(hdev, 1);
315 if (hdev->discov_timeout > 0) {
316 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318 to);
319 }
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
322
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
325 if (!old_pscan)
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
329
330 done:
331 hci_dev_unlock(hdev);
332 }
333
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337
338 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
339
340 if (rp->status)
341 return;
342
343 memcpy(hdev->dev_class, rp->dev_class, 3);
344
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 __u8 status = *((__u8 *) skb->data);
352 void *sent;
353
354 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 if (!sent)
358 return;
359
360 hci_dev_lock(hdev);
361
362 if (status == 0)
363 memcpy(hdev->dev_class, sent, 3);
364
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
367
368 hci_dev_unlock(hdev);
369 }
370
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 __u16 setting;
375
376 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
377
378 if (rp->status)
379 return;
380
381 setting = __le16_to_cpu(rp->voice_setting);
382
383 if (hdev->voice_setting == setting)
384 return;
385
386 hdev->voice_setting = setting;
387
388 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
389
390 if (hdev->notify)
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
395 struct sk_buff *skb)
396 {
397 __u8 status = *((__u8 *) skb->data);
398 __u16 setting;
399 void *sent;
400
401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
402
403 if (status)
404 return;
405
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 if (!sent)
408 return;
409
410 setting = get_unaligned_le16(sent);
411
412 if (hdev->voice_setting == setting)
413 return;
414
415 hdev->voice_setting = setting;
416
417 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
418
419 if (hdev->notify)
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421 }
422
423 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 __u8 status = *((__u8 *) skb->data);
426 struct hci_cp_write_ssp_mode *sent;
427
428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
429
430 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
431 if (!sent)
432 return;
433
434 if (!status) {
435 if (sent->mode)
436 hdev->host_features[0] |= LMP_HOST_SSP;
437 else
438 hdev->host_features[0] &= ~LMP_HOST_SSP;
439 }
440
441 if (test_bit(HCI_MGMT, &hdev->dev_flags))
442 mgmt_ssp_enable_complete(hdev, sent->mode, status);
443 else if (!status) {
444 if (sent->mode)
445 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
446 else
447 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 }
449 }
450
451 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
452 {
453 struct hci_rp_read_local_version *rp = (void *) skb->data;
454
455 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
456
457 if (rp->status)
458 return;
459
460 hdev->hci_ver = rp->hci_ver;
461 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
462 hdev->lmp_ver = rp->lmp_ver;
463 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
464 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
465
466 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
467 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
468 }
469
470 static void hci_cc_read_local_commands(struct hci_dev *hdev,
471 struct sk_buff *skb)
472 {
473 struct hci_rp_read_local_commands *rp = (void *) skb->data;
474
475 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
476
477 if (!rp->status)
478 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
479 }
480
481 static void hci_cc_read_local_features(struct hci_dev *hdev,
482 struct sk_buff *skb)
483 {
484 struct hci_rp_read_local_features *rp = (void *) skb->data;
485
486 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
487
488 if (rp->status)
489 return;
490
491 memcpy(hdev->features, rp->features, 8);
492
493 /* Adjust default settings according to features
494 * supported by device. */
495
496 if (hdev->features[0] & LMP_3SLOT)
497 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
498
499 if (hdev->features[0] & LMP_5SLOT)
500 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
501
502 if (hdev->features[1] & LMP_HV2) {
503 hdev->pkt_type |= (HCI_HV2);
504 hdev->esco_type |= (ESCO_HV2);
505 }
506
507 if (hdev->features[1] & LMP_HV3) {
508 hdev->pkt_type |= (HCI_HV3);
509 hdev->esco_type |= (ESCO_HV3);
510 }
511
512 if (lmp_esco_capable(hdev))
513 hdev->esco_type |= (ESCO_EV3);
514
515 if (hdev->features[4] & LMP_EV4)
516 hdev->esco_type |= (ESCO_EV4);
517
518 if (hdev->features[4] & LMP_EV5)
519 hdev->esco_type |= (ESCO_EV5);
520
521 if (hdev->features[5] & LMP_EDR_ESCO_2M)
522 hdev->esco_type |= (ESCO_2EV3);
523
524 if (hdev->features[5] & LMP_EDR_ESCO_3M)
525 hdev->esco_type |= (ESCO_3EV3);
526
527 if (hdev->features[5] & LMP_EDR_3S_ESCO)
528 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
529
530 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
531 hdev->features[0], hdev->features[1],
532 hdev->features[2], hdev->features[3],
533 hdev->features[4], hdev->features[5],
534 hdev->features[6], hdev->features[7]);
535 }
536
537 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
538 struct sk_buff *skb)
539 {
540 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
541
542 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
543
544 if (rp->status)
545 return;
546
547 switch (rp->page) {
548 case 0:
549 memcpy(hdev->features, rp->features, 8);
550 break;
551 case 1:
552 memcpy(hdev->host_features, rp->features, 8);
553 break;
554 }
555 }
556
557 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
558 struct sk_buff *skb)
559 {
560 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
561
562 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
563
564 if (!rp->status)
565 hdev->flow_ctl_mode = rp->mode;
566 }
567
568 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
569 {
570 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
571
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573
574 if (rp->status)
575 return;
576
577 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
578 hdev->sco_mtu = rp->sco_mtu;
579 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
580 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
581
582 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
583 hdev->sco_mtu = 64;
584 hdev->sco_pkts = 8;
585 }
586
587 hdev->acl_cnt = hdev->acl_pkts;
588 hdev->sco_cnt = hdev->sco_pkts;
589
590 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
591 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
592 }
593
594 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
595 {
596 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
597
598 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
599
600 if (!rp->status)
601 bacpy(&hdev->bdaddr, &rp->bdaddr);
602 }
603
604 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
605 struct sk_buff *skb)
606 {
607 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
608
609 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610
611 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
612 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
613 hdev->page_scan_window = __le16_to_cpu(rp->window);
614 }
615 }
616
617 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
618 struct sk_buff *skb)
619 {
620 u8 status = *((u8 *) skb->data);
621 struct hci_cp_write_page_scan_activity *sent;
622
623 BT_DBG("%s status 0x%2.2x", hdev->name, status);
624
625 if (status)
626 return;
627
628 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
629 if (!sent)
630 return;
631
632 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
633 hdev->page_scan_window = __le16_to_cpu(sent->window);
634 }
635
636 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
637 struct sk_buff *skb)
638 {
639 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
640
641 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
642
643 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
644 hdev->page_scan_type = rp->type;
645 }
646
647 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
648 struct sk_buff *skb)
649 {
650 u8 status = *((u8 *) skb->data);
651 u8 *type;
652
653 BT_DBG("%s status 0x%2.2x", hdev->name, status);
654
655 if (status)
656 return;
657
658 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
659 if (type)
660 hdev->page_scan_type = *type;
661 }
662
663 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
664 struct sk_buff *skb)
665 {
666 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
667
668 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
669
670 if (rp->status)
671 return;
672
673 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
674 hdev->block_len = __le16_to_cpu(rp->block_len);
675 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
676
677 hdev->block_cnt = hdev->num_blocks;
678
679 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
680 hdev->block_cnt, hdev->block_len);
681 }
682
683 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
684 struct sk_buff *skb)
685 {
686 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
687
688 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
689
690 if (rp->status)
691 goto a2mp_rsp;
692
693 hdev->amp_status = rp->amp_status;
694 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
695 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
696 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
697 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
698 hdev->amp_type = rp->amp_type;
699 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
700 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
701 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
702 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
703
704 a2mp_rsp:
705 a2mp_send_getinfo_rsp(hdev);
706 }
707
708 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
709 struct sk_buff *skb)
710 {
711 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
712 struct amp_assoc *assoc = &hdev->loc_assoc;
713 size_t rem_len, frag_len;
714
715 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716
717 if (rp->status)
718 goto a2mp_rsp;
719
720 frag_len = skb->len - sizeof(*rp);
721 rem_len = __le16_to_cpu(rp->rem_len);
722
723 if (rem_len > frag_len) {
724 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
725
726 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
727 assoc->offset += frag_len;
728
729 /* Read other fragments */
730 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
731
732 return;
733 }
734
735 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
736 assoc->len = assoc->offset + rem_len;
737 assoc->offset = 0;
738
739 a2mp_rsp:
740 /* Send A2MP Rsp when all fragments are received */
741 a2mp_send_getampassoc_rsp(hdev, rp->status);
742 a2mp_send_create_phy_link_req(hdev, rp->status);
743 }
744
745 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
746 struct sk_buff *skb)
747 {
748 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
749
750 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
751
752 if (!rp->status)
753 hdev->inq_tx_power = rp->tx_power;
754 }
755
756 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
757 {
758 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
759 struct hci_cp_pin_code_reply *cp;
760 struct hci_conn *conn;
761
762 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
763
764 hci_dev_lock(hdev);
765
766 if (test_bit(HCI_MGMT, &hdev->dev_flags))
767 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
768
769 if (rp->status)
770 goto unlock;
771
772 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
773 if (!cp)
774 goto unlock;
775
776 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
777 if (conn)
778 conn->pin_length = cp->pin_len;
779
780 unlock:
781 hci_dev_unlock(hdev);
782 }
783
784 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
785 {
786 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
787
788 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
789
790 hci_dev_lock(hdev);
791
792 if (test_bit(HCI_MGMT, &hdev->dev_flags))
793 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
794 rp->status);
795
796 hci_dev_unlock(hdev);
797 }
798
799 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
800 struct sk_buff *skb)
801 {
802 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
803
804 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
805
806 if (rp->status)
807 return;
808
809 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
810 hdev->le_pkts = rp->le_max_pkt;
811
812 hdev->le_cnt = hdev->le_pkts;
813
814 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
815 }
816
817 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
818 struct sk_buff *skb)
819 {
820 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
821
822 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823
824 if (!rp->status)
825 memcpy(hdev->le_features, rp->features, 8);
826 }
827
828 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
829 struct sk_buff *skb)
830 {
831 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
832
833 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
834
835 if (!rp->status)
836 hdev->adv_tx_power = rp->tx_power;
837 }
838
839 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
840 {
841 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
842
843 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
844
845 hci_dev_lock(hdev);
846
847 if (test_bit(HCI_MGMT, &hdev->dev_flags))
848 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
849 rp->status);
850
851 hci_dev_unlock(hdev);
852 }
853
854 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
855 struct sk_buff *skb)
856 {
857 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
858
859 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
860
861 hci_dev_lock(hdev);
862
863 if (test_bit(HCI_MGMT, &hdev->dev_flags))
864 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
865 ACL_LINK, 0, rp->status);
866
867 hci_dev_unlock(hdev);
868 }
869
870 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
871 {
872 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
873
874 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
875
876 hci_dev_lock(hdev);
877
878 if (test_bit(HCI_MGMT, &hdev->dev_flags))
879 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
880 0, rp->status);
881
882 hci_dev_unlock(hdev);
883 }
884
885 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
886 struct sk_buff *skb)
887 {
888 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
889
890 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
891
892 hci_dev_lock(hdev);
893
894 if (test_bit(HCI_MGMT, &hdev->dev_flags))
895 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
896 ACL_LINK, 0, rp->status);
897
898 hci_dev_unlock(hdev);
899 }
900
901 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
902 struct sk_buff *skb)
903 {
904 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
905
906 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
907
908 hci_dev_lock(hdev);
909 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
910 rp->randomizer, rp->status);
911 hci_dev_unlock(hdev);
912 }
913
914 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
915 {
916 __u8 *sent, status = *((__u8 *) skb->data);
917
918 BT_DBG("%s status 0x%2.2x", hdev->name, status);
919
920 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
921 if (!sent)
922 return;
923
924 hci_dev_lock(hdev);
925
926 if (!status) {
927 if (*sent)
928 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
929 else
930 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
931 }
932
933 if (!test_bit(HCI_INIT, &hdev->flags)) {
934 struct hci_request req;
935
936 hci_req_init(&req, hdev);
937 hci_update_ad(&req);
938 hci_req_run(&req, NULL);
939 }
940
941 hci_dev_unlock(hdev);
942 }
943
944 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
945 {
946 __u8 status = *((__u8 *) skb->data);
947
948 BT_DBG("%s status 0x%2.2x", hdev->name, status);
949
950 if (status) {
951 hci_dev_lock(hdev);
952 mgmt_start_discovery_failed(hdev, status);
953 hci_dev_unlock(hdev);
954 return;
955 }
956 }
957
958 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
959 struct sk_buff *skb)
960 {
961 struct hci_cp_le_set_scan_enable *cp;
962 __u8 status = *((__u8 *) skb->data);
963
964 BT_DBG("%s status 0x%2.2x", hdev->name, status);
965
966 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
967 if (!cp)
968 return;
969
970 switch (cp->enable) {
971 case LE_SCANNING_ENABLED:
972 if (status) {
973 hci_dev_lock(hdev);
974 mgmt_start_discovery_failed(hdev, status);
975 hci_dev_unlock(hdev);
976 return;
977 }
978
979 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
980
981 hci_dev_lock(hdev);
982 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
983 hci_dev_unlock(hdev);
984 break;
985
986 case LE_SCANNING_DISABLED:
987 if (status) {
988 hci_dev_lock(hdev);
989 mgmt_stop_discovery_failed(hdev, status);
990 hci_dev_unlock(hdev);
991 return;
992 }
993
994 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
995
996 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
997 hdev->discovery.state == DISCOVERY_FINDING) {
998 mgmt_interleaved_discovery(hdev);
999 } else {
1000 hci_dev_lock(hdev);
1001 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1002 hci_dev_unlock(hdev);
1003 }
1004
1005 break;
1006
1007 default:
1008 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1009 break;
1010 }
1011 }
1012
1013 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1014 struct sk_buff *skb)
1015 {
1016 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1017
1018 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1019
1020 if (!rp->status)
1021 hdev->le_white_list_size = rp->size;
1022 }
1023
1024 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1025 struct sk_buff *skb)
1026 {
1027 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1028
1029 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030
1031 if (!rp->status)
1032 memcpy(hdev->le_states, rp->le_states, 8);
1033 }
1034
1035 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1036 struct sk_buff *skb)
1037 {
1038 struct hci_cp_write_le_host_supported *sent;
1039 __u8 status = *((__u8 *) skb->data);
1040
1041 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1042
1043 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1044 if (!sent)
1045 return;
1046
1047 if (!status) {
1048 if (sent->le)
1049 hdev->host_features[0] |= LMP_HOST_LE;
1050 else
1051 hdev->host_features[0] &= ~LMP_HOST_LE;
1052
1053 if (sent->simul)
1054 hdev->host_features[0] |= LMP_HOST_LE_BREDR;
1055 else
1056 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
1057 }
1058
1059 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1060 !test_bit(HCI_INIT, &hdev->flags))
1061 mgmt_le_enable_complete(hdev, sent->le, status);
1062 }
1063
1064 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1065 struct sk_buff *skb)
1066 {
1067 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1068
1069 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1070 hdev->name, rp->status, rp->phy_handle);
1071
1072 if (rp->status)
1073 return;
1074
1075 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1076 }
1077
1078 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1079 {
1080 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1081
1082 if (status) {
1083 hci_conn_check_pending(hdev);
1084 hci_dev_lock(hdev);
1085 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1086 mgmt_start_discovery_failed(hdev, status);
1087 hci_dev_unlock(hdev);
1088 return;
1089 }
1090
1091 set_bit(HCI_INQUIRY, &hdev->flags);
1092
1093 hci_dev_lock(hdev);
1094 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1095 hci_dev_unlock(hdev);
1096 }
1097
1098 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1099 {
1100 struct hci_cp_create_conn *cp;
1101 struct hci_conn *conn;
1102
1103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1104
1105 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1106 if (!cp)
1107 return;
1108
1109 hci_dev_lock(hdev);
1110
1111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1112
1113 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1114
1115 if (status) {
1116 if (conn && conn->state == BT_CONNECT) {
1117 if (status != 0x0c || conn->attempt > 2) {
1118 conn->state = BT_CLOSED;
1119 hci_proto_connect_cfm(conn, status);
1120 hci_conn_del(conn);
1121 } else
1122 conn->state = BT_CONNECT2;
1123 }
1124 } else {
1125 if (!conn) {
1126 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1127 if (conn) {
1128 conn->out = true;
1129 conn->link_mode |= HCI_LM_MASTER;
1130 } else
1131 BT_ERR("No memory for new connection");
1132 }
1133 }
1134
1135 hci_dev_unlock(hdev);
1136 }
1137
1138 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1139 {
1140 struct hci_cp_add_sco *cp;
1141 struct hci_conn *acl, *sco;
1142 __u16 handle;
1143
1144 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1145
1146 if (!status)
1147 return;
1148
1149 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1150 if (!cp)
1151 return;
1152
1153 handle = __le16_to_cpu(cp->handle);
1154
1155 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1156
1157 hci_dev_lock(hdev);
1158
1159 acl = hci_conn_hash_lookup_handle(hdev, handle);
1160 if (acl) {
1161 sco = acl->link;
1162 if (sco) {
1163 sco->state = BT_CLOSED;
1164
1165 hci_proto_connect_cfm(sco, status);
1166 hci_conn_del(sco);
1167 }
1168 }
1169
1170 hci_dev_unlock(hdev);
1171 }
1172
1173 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1174 {
1175 struct hci_cp_auth_requested *cp;
1176 struct hci_conn *conn;
1177
1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1179
1180 if (!status)
1181 return;
1182
1183 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1184 if (!cp)
1185 return;
1186
1187 hci_dev_lock(hdev);
1188
1189 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1190 if (conn) {
1191 if (conn->state == BT_CONFIG) {
1192 hci_proto_connect_cfm(conn, status);
1193 hci_conn_drop(conn);
1194 }
1195 }
1196
1197 hci_dev_unlock(hdev);
1198 }
1199
1200 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1201 {
1202 struct hci_cp_set_conn_encrypt *cp;
1203 struct hci_conn *conn;
1204
1205 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1206
1207 if (!status)
1208 return;
1209
1210 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1211 if (!cp)
1212 return;
1213
1214 hci_dev_lock(hdev);
1215
1216 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1217 if (conn) {
1218 if (conn->state == BT_CONFIG) {
1219 hci_proto_connect_cfm(conn, status);
1220 hci_conn_drop(conn);
1221 }
1222 }
1223
1224 hci_dev_unlock(hdev);
1225 }
1226
1227 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1228 struct hci_conn *conn)
1229 {
1230 if (conn->state != BT_CONFIG || !conn->out)
1231 return 0;
1232
1233 if (conn->pending_sec_level == BT_SECURITY_SDP)
1234 return 0;
1235
1236 /* Only request authentication for SSP connections or non-SSP
1237 * devices with sec_level HIGH or if MITM protection is requested */
1238 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1239 conn->pending_sec_level != BT_SECURITY_HIGH)
1240 return 0;
1241
1242 return 1;
1243 }
1244
1245 static int hci_resolve_name(struct hci_dev *hdev,
1246 struct inquiry_entry *e)
1247 {
1248 struct hci_cp_remote_name_req cp;
1249
1250 memset(&cp, 0, sizeof(cp));
1251
1252 bacpy(&cp.bdaddr, &e->data.bdaddr);
1253 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1254 cp.pscan_mode = e->data.pscan_mode;
1255 cp.clock_offset = e->data.clock_offset;
1256
1257 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1258 }
1259
1260 static bool hci_resolve_next_name(struct hci_dev *hdev)
1261 {
1262 struct discovery_state *discov = &hdev->discovery;
1263 struct inquiry_entry *e;
1264
1265 if (list_empty(&discov->resolve))
1266 return false;
1267
1268 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1269 if (!e)
1270 return false;
1271
1272 if (hci_resolve_name(hdev, e) == 0) {
1273 e->name_state = NAME_PENDING;
1274 return true;
1275 }
1276
1277 return false;
1278 }
1279
1280 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1281 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1282 {
1283 struct discovery_state *discov = &hdev->discovery;
1284 struct inquiry_entry *e;
1285
1286 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1287 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1288 name_len, conn->dev_class);
1289
1290 if (discov->state == DISCOVERY_STOPPED)
1291 return;
1292
1293 if (discov->state == DISCOVERY_STOPPING)
1294 goto discov_complete;
1295
1296 if (discov->state != DISCOVERY_RESOLVING)
1297 return;
1298
1299 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1300 /* If the device was not found in a list of found devices names of which
1301 * are pending. there is no need to continue resolving a next name as it
1302 * will be done upon receiving another Remote Name Request Complete
1303 * Event */
1304 if (!e)
1305 return;
1306
1307 list_del(&e->list);
1308 if (name) {
1309 e->name_state = NAME_KNOWN;
1310 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1311 e->data.rssi, name, name_len);
1312 } else {
1313 e->name_state = NAME_NOT_KNOWN;
1314 }
1315
1316 if (hci_resolve_next_name(hdev))
1317 return;
1318
1319 discov_complete:
1320 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1321 }
1322
1323 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1324 {
1325 struct hci_cp_remote_name_req *cp;
1326 struct hci_conn *conn;
1327
1328 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1329
1330 /* If successful wait for the name req complete event before
1331 * checking for the need to do authentication */
1332 if (!status)
1333 return;
1334
1335 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1336 if (!cp)
1337 return;
1338
1339 hci_dev_lock(hdev);
1340
1341 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1342
1343 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1344 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1345
1346 if (!conn)
1347 goto unlock;
1348
1349 if (!hci_outgoing_auth_needed(hdev, conn))
1350 goto unlock;
1351
1352 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1353 struct hci_cp_auth_requested cp;
1354 cp.handle = __cpu_to_le16(conn->handle);
1355 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1356 }
1357
1358 unlock:
1359 hci_dev_unlock(hdev);
1360 }
1361
1362 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1363 {
1364 struct hci_cp_read_remote_features *cp;
1365 struct hci_conn *conn;
1366
1367 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1368
1369 if (!status)
1370 return;
1371
1372 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1373 if (!cp)
1374 return;
1375
1376 hci_dev_lock(hdev);
1377
1378 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1379 if (conn) {
1380 if (conn->state == BT_CONFIG) {
1381 hci_proto_connect_cfm(conn, status);
1382 hci_conn_drop(conn);
1383 }
1384 }
1385
1386 hci_dev_unlock(hdev);
1387 }
1388
1389 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1390 {
1391 struct hci_cp_read_remote_ext_features *cp;
1392 struct hci_conn *conn;
1393
1394 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1395
1396 if (!status)
1397 return;
1398
1399 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1400 if (!cp)
1401 return;
1402
1403 hci_dev_lock(hdev);
1404
1405 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1406 if (conn) {
1407 if (conn->state == BT_CONFIG) {
1408 hci_proto_connect_cfm(conn, status);
1409 hci_conn_drop(conn);
1410 }
1411 }
1412
1413 hci_dev_unlock(hdev);
1414 }
1415
1416 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1417 {
1418 struct hci_cp_setup_sync_conn *cp;
1419 struct hci_conn *acl, *sco;
1420 __u16 handle;
1421
1422 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1423
1424 if (!status)
1425 return;
1426
1427 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1428 if (!cp)
1429 return;
1430
1431 handle = __le16_to_cpu(cp->handle);
1432
1433 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1434
1435 hci_dev_lock(hdev);
1436
1437 acl = hci_conn_hash_lookup_handle(hdev, handle);
1438 if (acl) {
1439 sco = acl->link;
1440 if (sco) {
1441 sco->state = BT_CLOSED;
1442
1443 hci_proto_connect_cfm(sco, status);
1444 hci_conn_del(sco);
1445 }
1446 }
1447
1448 hci_dev_unlock(hdev);
1449 }
1450
1451 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1452 {
1453 struct hci_cp_sniff_mode *cp;
1454 struct hci_conn *conn;
1455
1456 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1457
1458 if (!status)
1459 return;
1460
1461 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1462 if (!cp)
1463 return;
1464
1465 hci_dev_lock(hdev);
1466
1467 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1468 if (conn) {
1469 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1470
1471 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1472 hci_sco_setup(conn, status);
1473 }
1474
1475 hci_dev_unlock(hdev);
1476 }
1477
1478 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1479 {
1480 struct hci_cp_exit_sniff_mode *cp;
1481 struct hci_conn *conn;
1482
1483 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1484
1485 if (!status)
1486 return;
1487
1488 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1489 if (!cp)
1490 return;
1491
1492 hci_dev_lock(hdev);
1493
1494 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1495 if (conn) {
1496 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1497
1498 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1499 hci_sco_setup(conn, status);
1500 }
1501
1502 hci_dev_unlock(hdev);
1503 }
1504
1505 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1506 {
1507 struct hci_cp_disconnect *cp;
1508 struct hci_conn *conn;
1509
1510 if (!status)
1511 return;
1512
1513 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1514 if (!cp)
1515 return;
1516
1517 hci_dev_lock(hdev);
1518
1519 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1520 if (conn)
1521 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1522 conn->dst_type, status);
1523
1524 hci_dev_unlock(hdev);
1525 }
1526
1527 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1528 {
1529 struct hci_conn *conn;
1530
1531 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1532
1533 if (status) {
1534 hci_dev_lock(hdev);
1535
1536 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1537 if (!conn) {
1538 hci_dev_unlock(hdev);
1539 return;
1540 }
1541
1542 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1543
1544 conn->state = BT_CLOSED;
1545 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1546 conn->dst_type, status);
1547 hci_proto_connect_cfm(conn, status);
1548 hci_conn_del(conn);
1549
1550 hci_dev_unlock(hdev);
1551 }
1552 }
1553
1554 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1555 {
1556 struct hci_cp_create_phy_link *cp;
1557
1558 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1559
1560 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1561 if (!cp)
1562 return;
1563
1564 hci_dev_lock(hdev);
1565
1566 if (status) {
1567 struct hci_conn *hcon;
1568
1569 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1570 if (hcon)
1571 hci_conn_del(hcon);
1572 } else {
1573 amp_write_remote_assoc(hdev, cp->phy_handle);
1574 }
1575
1576 hci_dev_unlock(hdev);
1577 }
1578
1579 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1580 {
1581 struct hci_cp_accept_phy_link *cp;
1582
1583 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1584
1585 if (status)
1586 return;
1587
1588 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1589 if (!cp)
1590 return;
1591
1592 amp_write_remote_assoc(hdev, cp->phy_handle);
1593 }
1594
1595 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1596 {
1597 __u8 status = *((__u8 *) skb->data);
1598 struct discovery_state *discov = &hdev->discovery;
1599 struct inquiry_entry *e;
1600
1601 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1602
1603 hci_conn_check_pending(hdev);
1604
1605 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1606 return;
1607
1608 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1609 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1610
1611 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1612 return;
1613
1614 hci_dev_lock(hdev);
1615
1616 if (discov->state != DISCOVERY_FINDING)
1617 goto unlock;
1618
1619 if (list_empty(&discov->resolve)) {
1620 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1621 goto unlock;
1622 }
1623
1624 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1625 if (e && hci_resolve_name(hdev, e) == 0) {
1626 e->name_state = NAME_PENDING;
1627 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1628 } else {
1629 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1630 }
1631
1632 unlock:
1633 hci_dev_unlock(hdev);
1634 }
1635
1636 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1637 {
1638 struct inquiry_data data;
1639 struct inquiry_info *info = (void *) (skb->data + 1);
1640 int num_rsp = *((__u8 *) skb->data);
1641
1642 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1643
1644 if (!num_rsp)
1645 return;
1646
1647 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1648 return;
1649
1650 hci_dev_lock(hdev);
1651
1652 for (; num_rsp; num_rsp--, info++) {
1653 bool name_known, ssp;
1654
1655 bacpy(&data.bdaddr, &info->bdaddr);
1656 data.pscan_rep_mode = info->pscan_rep_mode;
1657 data.pscan_period_mode = info->pscan_period_mode;
1658 data.pscan_mode = info->pscan_mode;
1659 memcpy(data.dev_class, info->dev_class, 3);
1660 data.clock_offset = info->clock_offset;
1661 data.rssi = 0x00;
1662 data.ssp_mode = 0x00;
1663
1664 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1665 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1666 info->dev_class, 0, !name_known, ssp, NULL,
1667 0);
1668 }
1669
1670 hci_dev_unlock(hdev);
1671 }
1672
1673 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1674 {
1675 struct hci_ev_conn_complete *ev = (void *) skb->data;
1676 struct hci_conn *conn;
1677
1678 BT_DBG("%s", hdev->name);
1679
1680 hci_dev_lock(hdev);
1681
1682 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1683 if (!conn) {
1684 if (ev->link_type != SCO_LINK)
1685 goto unlock;
1686
1687 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1688 if (!conn)
1689 goto unlock;
1690
1691 conn->type = SCO_LINK;
1692 }
1693
1694 if (!ev->status) {
1695 conn->handle = __le16_to_cpu(ev->handle);
1696
1697 if (conn->type == ACL_LINK) {
1698 conn->state = BT_CONFIG;
1699 hci_conn_hold(conn);
1700
1701 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1702 !hci_find_link_key(hdev, &ev->bdaddr))
1703 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1704 else
1705 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1706 } else
1707 conn->state = BT_CONNECTED;
1708
1709 hci_conn_add_sysfs(conn);
1710
1711 if (test_bit(HCI_AUTH, &hdev->flags))
1712 conn->link_mode |= HCI_LM_AUTH;
1713
1714 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1715 conn->link_mode |= HCI_LM_ENCRYPT;
1716
1717 /* Get remote features */
1718 if (conn->type == ACL_LINK) {
1719 struct hci_cp_read_remote_features cp;
1720 cp.handle = ev->handle;
1721 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1722 sizeof(cp), &cp);
1723 }
1724
1725 /* Set packet type for incoming connection */
1726 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1727 struct hci_cp_change_conn_ptype cp;
1728 cp.handle = ev->handle;
1729 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1730 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1731 &cp);
1732 }
1733 } else {
1734 conn->state = BT_CLOSED;
1735 if (conn->type == ACL_LINK)
1736 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1737 conn->dst_type, ev->status);
1738 }
1739
1740 if (conn->type == ACL_LINK)
1741 hci_sco_setup(conn, ev->status);
1742
1743 if (ev->status) {
1744 hci_proto_connect_cfm(conn, ev->status);
1745 hci_conn_del(conn);
1746 } else if (ev->link_type != ACL_LINK)
1747 hci_proto_connect_cfm(conn, ev->status);
1748
1749 unlock:
1750 hci_dev_unlock(hdev);
1751
1752 hci_conn_check_pending(hdev);
1753 }
1754
1755 void hci_conn_accept(struct hci_conn *conn, int mask)
1756 {
1757 struct hci_dev *hdev = conn->hdev;
1758
1759 BT_DBG("conn %p", conn);
1760
1761 conn->state = BT_CONFIG;
1762
1763 if (!lmp_esco_capable(hdev)) {
1764 struct hci_cp_accept_conn_req cp;
1765
1766 bacpy(&cp.bdaddr, &conn->dst);
1767
1768 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1769 cp.role = 0x00; /* Become master */
1770 else
1771 cp.role = 0x01; /* Remain slave */
1772
1773 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
1774 } else /* lmp_esco_capable(hdev)) */ {
1775 struct hci_cp_accept_sync_conn_req cp;
1776
1777 bacpy(&cp.bdaddr, &conn->dst);
1778 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1779
1780 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1781 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1782 cp.max_latency = __constant_cpu_to_le16(0xffff);
1783 cp.content_format = cpu_to_le16(hdev->voice_setting);
1784 cp.retrans_effort = 0xff;
1785
1786 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1787 sizeof(cp), &cp);
1788 }
1789 }
1790
1791 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1792 {
1793 struct hci_ev_conn_request *ev = (void *) skb->data;
1794 int mask = hdev->link_mode;
1795 __u8 flags = 0;
1796
1797 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1798 ev->link_type);
1799
1800 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1801 &flags);
1802
1803 if ((mask & HCI_LM_ACCEPT) &&
1804 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1805 /* Connection accepted */
1806 struct inquiry_entry *ie;
1807 struct hci_conn *conn;
1808
1809 hci_dev_lock(hdev);
1810
1811 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1812 if (ie)
1813 memcpy(ie->data.dev_class, ev->dev_class, 3);
1814
1815 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1816 &ev->bdaddr);
1817 if (!conn) {
1818 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1819 if (!conn) {
1820 BT_ERR("No memory for new connection");
1821 hci_dev_unlock(hdev);
1822 return;
1823 }
1824 }
1825
1826 memcpy(conn->dev_class, ev->dev_class, 3);
1827
1828 hci_dev_unlock(hdev);
1829
1830 if (ev->link_type == ACL_LINK ||
1831 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1832 struct hci_cp_accept_conn_req cp;
1833 conn->state = BT_CONNECT;
1834
1835 bacpy(&cp.bdaddr, &ev->bdaddr);
1836
1837 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1838 cp.role = 0x00; /* Become master */
1839 else
1840 cp.role = 0x01; /* Remain slave */
1841
1842 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1843 &cp);
1844 } else if (!(flags & HCI_PROTO_DEFER)) {
1845 struct hci_cp_accept_sync_conn_req cp;
1846 conn->state = BT_CONNECT;
1847
1848 bacpy(&cp.bdaddr, &ev->bdaddr);
1849 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1850
1851 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1852 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1853 cp.max_latency = __constant_cpu_to_le16(0xffff);
1854 cp.content_format = cpu_to_le16(hdev->voice_setting);
1855 cp.retrans_effort = 0xff;
1856
1857 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1858 sizeof(cp), &cp);
1859 } else {
1860 conn->state = BT_CONNECT2;
1861 hci_proto_connect_cfm(conn, 0);
1862 }
1863 } else {
1864 /* Connection rejected */
1865 struct hci_cp_reject_conn_req cp;
1866
1867 bacpy(&cp.bdaddr, &ev->bdaddr);
1868 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1869 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1870 }
1871 }
1872
1873 static u8 hci_to_mgmt_reason(u8 err)
1874 {
1875 switch (err) {
1876 case HCI_ERROR_CONNECTION_TIMEOUT:
1877 return MGMT_DEV_DISCONN_TIMEOUT;
1878 case HCI_ERROR_REMOTE_USER_TERM:
1879 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1880 case HCI_ERROR_REMOTE_POWER_OFF:
1881 return MGMT_DEV_DISCONN_REMOTE;
1882 case HCI_ERROR_LOCAL_HOST_TERM:
1883 return MGMT_DEV_DISCONN_LOCAL_HOST;
1884 default:
1885 return MGMT_DEV_DISCONN_UNKNOWN;
1886 }
1887 }
1888
1889 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1890 {
1891 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1892 struct hci_conn *conn;
1893
1894 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1895
1896 hci_dev_lock(hdev);
1897
1898 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1899 if (!conn)
1900 goto unlock;
1901
1902 if (ev->status == 0)
1903 conn->state = BT_CLOSED;
1904
1905 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1906 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1907 if (ev->status) {
1908 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1909 conn->dst_type, ev->status);
1910 } else {
1911 u8 reason = hci_to_mgmt_reason(ev->reason);
1912
1913 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1914 conn->dst_type, reason);
1915 }
1916 }
1917
1918 if (ev->status == 0) {
1919 if (conn->type == ACL_LINK && conn->flush_key)
1920 hci_remove_link_key(hdev, &conn->dst);
1921 hci_proto_disconn_cfm(conn, ev->reason);
1922 hci_conn_del(conn);
1923 }
1924
1925 unlock:
1926 hci_dev_unlock(hdev);
1927 }
1928
1929 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1930 {
1931 struct hci_ev_auth_complete *ev = (void *) skb->data;
1932 struct hci_conn *conn;
1933
1934 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1935
1936 hci_dev_lock(hdev);
1937
1938 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1939 if (!conn)
1940 goto unlock;
1941
1942 if (!ev->status) {
1943 if (!hci_conn_ssp_enabled(conn) &&
1944 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1945 BT_INFO("re-auth of legacy device is not possible.");
1946 } else {
1947 conn->link_mode |= HCI_LM_AUTH;
1948 conn->sec_level = conn->pending_sec_level;
1949 }
1950 } else {
1951 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1952 ev->status);
1953 }
1954
1955 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1956 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1957
1958 if (conn->state == BT_CONFIG) {
1959 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1960 struct hci_cp_set_conn_encrypt cp;
1961 cp.handle = ev->handle;
1962 cp.encrypt = 0x01;
1963 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1964 &cp);
1965 } else {
1966 conn->state = BT_CONNECTED;
1967 hci_proto_connect_cfm(conn, ev->status);
1968 hci_conn_drop(conn);
1969 }
1970 } else {
1971 hci_auth_cfm(conn, ev->status);
1972
1973 hci_conn_hold(conn);
1974 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1975 hci_conn_drop(conn);
1976 }
1977
1978 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1979 if (!ev->status) {
1980 struct hci_cp_set_conn_encrypt cp;
1981 cp.handle = ev->handle;
1982 cp.encrypt = 0x01;
1983 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1984 &cp);
1985 } else {
1986 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1987 hci_encrypt_cfm(conn, ev->status, 0x00);
1988 }
1989 }
1990
1991 unlock:
1992 hci_dev_unlock(hdev);
1993 }
1994
1995 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1996 {
1997 struct hci_ev_remote_name *ev = (void *) skb->data;
1998 struct hci_conn *conn;
1999
2000 BT_DBG("%s", hdev->name);
2001
2002 hci_conn_check_pending(hdev);
2003
2004 hci_dev_lock(hdev);
2005
2006 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2007
2008 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2009 goto check_auth;
2010
2011 if (ev->status == 0)
2012 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2013 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2014 else
2015 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2016
2017 check_auth:
2018 if (!conn)
2019 goto unlock;
2020
2021 if (!hci_outgoing_auth_needed(hdev, conn))
2022 goto unlock;
2023
2024 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2025 struct hci_cp_auth_requested cp;
2026 cp.handle = __cpu_to_le16(conn->handle);
2027 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2028 }
2029
2030 unlock:
2031 hci_dev_unlock(hdev);
2032 }
2033
2034 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2035 {
2036 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2037 struct hci_conn *conn;
2038
2039 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2040
2041 hci_dev_lock(hdev);
2042
2043 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2044 if (conn) {
2045 if (!ev->status) {
2046 if (ev->encrypt) {
2047 /* Encryption implies authentication */
2048 conn->link_mode |= HCI_LM_AUTH;
2049 conn->link_mode |= HCI_LM_ENCRYPT;
2050 conn->sec_level = conn->pending_sec_level;
2051 } else
2052 conn->link_mode &= ~HCI_LM_ENCRYPT;
2053 }
2054
2055 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2056
2057 if (ev->status && conn->state == BT_CONNECTED) {
2058 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2059 hci_conn_drop(conn);
2060 goto unlock;
2061 }
2062
2063 if (conn->state == BT_CONFIG) {
2064 if (!ev->status)
2065 conn->state = BT_CONNECTED;
2066
2067 hci_proto_connect_cfm(conn, ev->status);
2068 hci_conn_drop(conn);
2069 } else
2070 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2071 }
2072
2073 unlock:
2074 hci_dev_unlock(hdev);
2075 }
2076
2077 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2078 struct sk_buff *skb)
2079 {
2080 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2081 struct hci_conn *conn;
2082
2083 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2084
2085 hci_dev_lock(hdev);
2086
2087 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2088 if (conn) {
2089 if (!ev->status)
2090 conn->link_mode |= HCI_LM_SECURE;
2091
2092 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2093
2094 hci_key_change_cfm(conn, ev->status);
2095 }
2096
2097 hci_dev_unlock(hdev);
2098 }
2099
2100 static void hci_remote_features_evt(struct hci_dev *hdev,
2101 struct sk_buff *skb)
2102 {
2103 struct hci_ev_remote_features *ev = (void *) skb->data;
2104 struct hci_conn *conn;
2105
2106 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2107
2108 hci_dev_lock(hdev);
2109
2110 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2111 if (!conn)
2112 goto unlock;
2113
2114 if (!ev->status)
2115 memcpy(conn->features, ev->features, 8);
2116
2117 if (conn->state != BT_CONFIG)
2118 goto unlock;
2119
2120 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2121 struct hci_cp_read_remote_ext_features cp;
2122 cp.handle = ev->handle;
2123 cp.page = 0x01;
2124 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2125 sizeof(cp), &cp);
2126 goto unlock;
2127 }
2128
2129 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2130 struct hci_cp_remote_name_req cp;
2131 memset(&cp, 0, sizeof(cp));
2132 bacpy(&cp.bdaddr, &conn->dst);
2133 cp.pscan_rep_mode = 0x02;
2134 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2135 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2136 mgmt_device_connected(hdev, &conn->dst, conn->type,
2137 conn->dst_type, 0, NULL, 0,
2138 conn->dev_class);
2139
2140 if (!hci_outgoing_auth_needed(hdev, conn)) {
2141 conn->state = BT_CONNECTED;
2142 hci_proto_connect_cfm(conn, ev->status);
2143 hci_conn_drop(conn);
2144 }
2145
2146 unlock:
2147 hci_dev_unlock(hdev);
2148 }
2149
2150 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2151 {
2152 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2153 u8 status = skb->data[sizeof(*ev)];
2154 __u16 opcode;
2155
2156 skb_pull(skb, sizeof(*ev));
2157
2158 opcode = __le16_to_cpu(ev->opcode);
2159
2160 switch (opcode) {
2161 case HCI_OP_INQUIRY_CANCEL:
2162 hci_cc_inquiry_cancel(hdev, skb);
2163 break;
2164
2165 case HCI_OP_PERIODIC_INQ:
2166 hci_cc_periodic_inq(hdev, skb);
2167 break;
2168
2169 case HCI_OP_EXIT_PERIODIC_INQ:
2170 hci_cc_exit_periodic_inq(hdev, skb);
2171 break;
2172
2173 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2174 hci_cc_remote_name_req_cancel(hdev, skb);
2175 break;
2176
2177 case HCI_OP_ROLE_DISCOVERY:
2178 hci_cc_role_discovery(hdev, skb);
2179 break;
2180
2181 case HCI_OP_READ_LINK_POLICY:
2182 hci_cc_read_link_policy(hdev, skb);
2183 break;
2184
2185 case HCI_OP_WRITE_LINK_POLICY:
2186 hci_cc_write_link_policy(hdev, skb);
2187 break;
2188
2189 case HCI_OP_READ_DEF_LINK_POLICY:
2190 hci_cc_read_def_link_policy(hdev, skb);
2191 break;
2192
2193 case HCI_OP_WRITE_DEF_LINK_POLICY:
2194 hci_cc_write_def_link_policy(hdev, skb);
2195 break;
2196
2197 case HCI_OP_RESET:
2198 hci_cc_reset(hdev, skb);
2199 break;
2200
2201 case HCI_OP_WRITE_LOCAL_NAME:
2202 hci_cc_write_local_name(hdev, skb);
2203 break;
2204
2205 case HCI_OP_READ_LOCAL_NAME:
2206 hci_cc_read_local_name(hdev, skb);
2207 break;
2208
2209 case HCI_OP_WRITE_AUTH_ENABLE:
2210 hci_cc_write_auth_enable(hdev, skb);
2211 break;
2212
2213 case HCI_OP_WRITE_ENCRYPT_MODE:
2214 hci_cc_write_encrypt_mode(hdev, skb);
2215 break;
2216
2217 case HCI_OP_WRITE_SCAN_ENABLE:
2218 hci_cc_write_scan_enable(hdev, skb);
2219 break;
2220
2221 case HCI_OP_READ_CLASS_OF_DEV:
2222 hci_cc_read_class_of_dev(hdev, skb);
2223 break;
2224
2225 case HCI_OP_WRITE_CLASS_OF_DEV:
2226 hci_cc_write_class_of_dev(hdev, skb);
2227 break;
2228
2229 case HCI_OP_READ_VOICE_SETTING:
2230 hci_cc_read_voice_setting(hdev, skb);
2231 break;
2232
2233 case HCI_OP_WRITE_VOICE_SETTING:
2234 hci_cc_write_voice_setting(hdev, skb);
2235 break;
2236
2237 case HCI_OP_WRITE_SSP_MODE:
2238 hci_cc_write_ssp_mode(hdev, skb);
2239 break;
2240
2241 case HCI_OP_READ_LOCAL_VERSION:
2242 hci_cc_read_local_version(hdev, skb);
2243 break;
2244
2245 case HCI_OP_READ_LOCAL_COMMANDS:
2246 hci_cc_read_local_commands(hdev, skb);
2247 break;
2248
2249 case HCI_OP_READ_LOCAL_FEATURES:
2250 hci_cc_read_local_features(hdev, skb);
2251 break;
2252
2253 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2254 hci_cc_read_local_ext_features(hdev, skb);
2255 break;
2256
2257 case HCI_OP_READ_BUFFER_SIZE:
2258 hci_cc_read_buffer_size(hdev, skb);
2259 break;
2260
2261 case HCI_OP_READ_BD_ADDR:
2262 hci_cc_read_bd_addr(hdev, skb);
2263 break;
2264
2265 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2266 hci_cc_read_page_scan_activity(hdev, skb);
2267 break;
2268
2269 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2270 hci_cc_write_page_scan_activity(hdev, skb);
2271 break;
2272
2273 case HCI_OP_READ_PAGE_SCAN_TYPE:
2274 hci_cc_read_page_scan_type(hdev, skb);
2275 break;
2276
2277 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2278 hci_cc_write_page_scan_type(hdev, skb);
2279 break;
2280
2281 case HCI_OP_READ_DATA_BLOCK_SIZE:
2282 hci_cc_read_data_block_size(hdev, skb);
2283 break;
2284
2285 case HCI_OP_READ_FLOW_CONTROL_MODE:
2286 hci_cc_read_flow_control_mode(hdev, skb);
2287 break;
2288
2289 case HCI_OP_READ_LOCAL_AMP_INFO:
2290 hci_cc_read_local_amp_info(hdev, skb);
2291 break;
2292
2293 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2294 hci_cc_read_local_amp_assoc(hdev, skb);
2295 break;
2296
2297 case HCI_OP_READ_INQ_RSP_TX_POWER:
2298 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2299 break;
2300
2301 case HCI_OP_PIN_CODE_REPLY:
2302 hci_cc_pin_code_reply(hdev, skb);
2303 break;
2304
2305 case HCI_OP_PIN_CODE_NEG_REPLY:
2306 hci_cc_pin_code_neg_reply(hdev, skb);
2307 break;
2308
2309 case HCI_OP_READ_LOCAL_OOB_DATA:
2310 hci_cc_read_local_oob_data_reply(hdev, skb);
2311 break;
2312
2313 case HCI_OP_LE_READ_BUFFER_SIZE:
2314 hci_cc_le_read_buffer_size(hdev, skb);
2315 break;
2316
2317 case HCI_OP_LE_READ_LOCAL_FEATURES:
2318 hci_cc_le_read_local_features(hdev, skb);
2319 break;
2320
2321 case HCI_OP_LE_READ_ADV_TX_POWER:
2322 hci_cc_le_read_adv_tx_power(hdev, skb);
2323 break;
2324
2325 case HCI_OP_USER_CONFIRM_REPLY:
2326 hci_cc_user_confirm_reply(hdev, skb);
2327 break;
2328
2329 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2330 hci_cc_user_confirm_neg_reply(hdev, skb);
2331 break;
2332
2333 case HCI_OP_USER_PASSKEY_REPLY:
2334 hci_cc_user_passkey_reply(hdev, skb);
2335 break;
2336
2337 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2338 hci_cc_user_passkey_neg_reply(hdev, skb);
2339 break;
2340
2341 case HCI_OP_LE_SET_SCAN_PARAM:
2342 hci_cc_le_set_scan_param(hdev, skb);
2343 break;
2344
2345 case HCI_OP_LE_SET_ADV_ENABLE:
2346 hci_cc_le_set_adv_enable(hdev, skb);
2347 break;
2348
2349 case HCI_OP_LE_SET_SCAN_ENABLE:
2350 hci_cc_le_set_scan_enable(hdev, skb);
2351 break;
2352
2353 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2354 hci_cc_le_read_white_list_size(hdev, skb);
2355 break;
2356
2357 case HCI_OP_LE_READ_SUPPORTED_STATES:
2358 hci_cc_le_read_supported_states(hdev, skb);
2359 break;
2360
2361 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2362 hci_cc_write_le_host_supported(hdev, skb);
2363 break;
2364
2365 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2366 hci_cc_write_remote_amp_assoc(hdev, skb);
2367 break;
2368
2369 default:
2370 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2371 break;
2372 }
2373
2374 if (opcode != HCI_OP_NOP)
2375 del_timer(&hdev->cmd_timer);
2376
2377 hci_req_cmd_complete(hdev, opcode, status);
2378
2379 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2380 atomic_set(&hdev->cmd_cnt, 1);
2381 if (!skb_queue_empty(&hdev->cmd_q))
2382 queue_work(hdev->workqueue, &hdev->cmd_work);
2383 }
2384 }
2385
2386 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2387 {
2388 struct hci_ev_cmd_status *ev = (void *) skb->data;
2389 __u16 opcode;
2390
2391 skb_pull(skb, sizeof(*ev));
2392
2393 opcode = __le16_to_cpu(ev->opcode);
2394
2395 switch (opcode) {
2396 case HCI_OP_INQUIRY:
2397 hci_cs_inquiry(hdev, ev->status);
2398 break;
2399
2400 case HCI_OP_CREATE_CONN:
2401 hci_cs_create_conn(hdev, ev->status);
2402 break;
2403
2404 case HCI_OP_ADD_SCO:
2405 hci_cs_add_sco(hdev, ev->status);
2406 break;
2407
2408 case HCI_OP_AUTH_REQUESTED:
2409 hci_cs_auth_requested(hdev, ev->status);
2410 break;
2411
2412 case HCI_OP_SET_CONN_ENCRYPT:
2413 hci_cs_set_conn_encrypt(hdev, ev->status);
2414 break;
2415
2416 case HCI_OP_REMOTE_NAME_REQ:
2417 hci_cs_remote_name_req(hdev, ev->status);
2418 break;
2419
2420 case HCI_OP_READ_REMOTE_FEATURES:
2421 hci_cs_read_remote_features(hdev, ev->status);
2422 break;
2423
2424 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2425 hci_cs_read_remote_ext_features(hdev, ev->status);
2426 break;
2427
2428 case HCI_OP_SETUP_SYNC_CONN:
2429 hci_cs_setup_sync_conn(hdev, ev->status);
2430 break;
2431
2432 case HCI_OP_SNIFF_MODE:
2433 hci_cs_sniff_mode(hdev, ev->status);
2434 break;
2435
2436 case HCI_OP_EXIT_SNIFF_MODE:
2437 hci_cs_exit_sniff_mode(hdev, ev->status);
2438 break;
2439
2440 case HCI_OP_DISCONNECT:
2441 hci_cs_disconnect(hdev, ev->status);
2442 break;
2443
2444 case HCI_OP_LE_CREATE_CONN:
2445 hci_cs_le_create_conn(hdev, ev->status);
2446 break;
2447
2448 case HCI_OP_CREATE_PHY_LINK:
2449 hci_cs_create_phylink(hdev, ev->status);
2450 break;
2451
2452 case HCI_OP_ACCEPT_PHY_LINK:
2453 hci_cs_accept_phylink(hdev, ev->status);
2454 break;
2455
2456 default:
2457 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2458 break;
2459 }
2460
2461 if (opcode != HCI_OP_NOP)
2462 del_timer(&hdev->cmd_timer);
2463
2464 if (ev->status ||
2465 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2466 hci_req_cmd_complete(hdev, opcode, ev->status);
2467
2468 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2469 atomic_set(&hdev->cmd_cnt, 1);
2470 if (!skb_queue_empty(&hdev->cmd_q))
2471 queue_work(hdev->workqueue, &hdev->cmd_work);
2472 }
2473 }
2474
2475 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2476 {
2477 struct hci_ev_role_change *ev = (void *) skb->data;
2478 struct hci_conn *conn;
2479
2480 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2481
2482 hci_dev_lock(hdev);
2483
2484 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2485 if (conn) {
2486 if (!ev->status) {
2487 if (ev->role)
2488 conn->link_mode &= ~HCI_LM_MASTER;
2489 else
2490 conn->link_mode |= HCI_LM_MASTER;
2491 }
2492
2493 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2494
2495 hci_role_switch_cfm(conn, ev->status, ev->role);
2496 }
2497
2498 hci_dev_unlock(hdev);
2499 }
2500
2501 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2502 {
2503 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2504 int i;
2505
2506 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2507 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2508 return;
2509 }
2510
2511 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2512 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2513 BT_DBG("%s bad parameters", hdev->name);
2514 return;
2515 }
2516
2517 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2518
2519 for (i = 0; i < ev->num_hndl; i++) {
2520 struct hci_comp_pkts_info *info = &ev->handles[i];
2521 struct hci_conn *conn;
2522 __u16 handle, count;
2523
2524 handle = __le16_to_cpu(info->handle);
2525 count = __le16_to_cpu(info->count);
2526
2527 conn = hci_conn_hash_lookup_handle(hdev, handle);
2528 if (!conn)
2529 continue;
2530
2531 conn->sent -= count;
2532
2533 switch (conn->type) {
2534 case ACL_LINK:
2535 hdev->acl_cnt += count;
2536 if (hdev->acl_cnt > hdev->acl_pkts)
2537 hdev->acl_cnt = hdev->acl_pkts;
2538 break;
2539
2540 case LE_LINK:
2541 if (hdev->le_pkts) {
2542 hdev->le_cnt += count;
2543 if (hdev->le_cnt > hdev->le_pkts)
2544 hdev->le_cnt = hdev->le_pkts;
2545 } else {
2546 hdev->acl_cnt += count;
2547 if (hdev->acl_cnt > hdev->acl_pkts)
2548 hdev->acl_cnt = hdev->acl_pkts;
2549 }
2550 break;
2551
2552 case SCO_LINK:
2553 hdev->sco_cnt += count;
2554 if (hdev->sco_cnt > hdev->sco_pkts)
2555 hdev->sco_cnt = hdev->sco_pkts;
2556 break;
2557
2558 default:
2559 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2560 break;
2561 }
2562 }
2563
2564 queue_work(hdev->workqueue, &hdev->tx_work);
2565 }
2566
2567 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2568 __u16 handle)
2569 {
2570 struct hci_chan *chan;
2571
2572 switch (hdev->dev_type) {
2573 case HCI_BREDR:
2574 return hci_conn_hash_lookup_handle(hdev, handle);
2575 case HCI_AMP:
2576 chan = hci_chan_lookup_handle(hdev, handle);
2577 if (chan)
2578 return chan->conn;
2579 break;
2580 default:
2581 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2582 break;
2583 }
2584
2585 return NULL;
2586 }
2587
2588 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2589 {
2590 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2591 int i;
2592
2593 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2594 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2595 return;
2596 }
2597
2598 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2599 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2600 BT_DBG("%s bad parameters", hdev->name);
2601 return;
2602 }
2603
2604 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2605 ev->num_hndl);
2606
2607 for (i = 0; i < ev->num_hndl; i++) {
2608 struct hci_comp_blocks_info *info = &ev->handles[i];
2609 struct hci_conn *conn = NULL;
2610 __u16 handle, block_count;
2611
2612 handle = __le16_to_cpu(info->handle);
2613 block_count = __le16_to_cpu(info->blocks);
2614
2615 conn = __hci_conn_lookup_handle(hdev, handle);
2616 if (!conn)
2617 continue;
2618
2619 conn->sent -= block_count;
2620
2621 switch (conn->type) {
2622 case ACL_LINK:
2623 case AMP_LINK:
2624 hdev->block_cnt += block_count;
2625 if (hdev->block_cnt > hdev->num_blocks)
2626 hdev->block_cnt = hdev->num_blocks;
2627 break;
2628
2629 default:
2630 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2631 break;
2632 }
2633 }
2634
2635 queue_work(hdev->workqueue, &hdev->tx_work);
2636 }
2637
2638 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2639 {
2640 struct hci_ev_mode_change *ev = (void *) skb->data;
2641 struct hci_conn *conn;
2642
2643 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2644
2645 hci_dev_lock(hdev);
2646
2647 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2648 if (conn) {
2649 conn->mode = ev->mode;
2650 conn->interval = __le16_to_cpu(ev->interval);
2651
2652 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2653 &conn->flags)) {
2654 if (conn->mode == HCI_CM_ACTIVE)
2655 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2656 else
2657 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2658 }
2659
2660 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2661 hci_sco_setup(conn, ev->status);
2662 }
2663
2664 hci_dev_unlock(hdev);
2665 }
2666
2667 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2668 {
2669 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2670 struct hci_conn *conn;
2671
2672 BT_DBG("%s", hdev->name);
2673
2674 hci_dev_lock(hdev);
2675
2676 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2677 if (!conn)
2678 goto unlock;
2679
2680 if (conn->state == BT_CONNECTED) {
2681 hci_conn_hold(conn);
2682 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2683 hci_conn_drop(conn);
2684 }
2685
2686 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2687 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2688 sizeof(ev->bdaddr), &ev->bdaddr);
2689 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2690 u8 secure;
2691
2692 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2693 secure = 1;
2694 else
2695 secure = 0;
2696
2697 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2698 }
2699
2700 unlock:
2701 hci_dev_unlock(hdev);
2702 }
2703
2704 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2705 {
2706 struct hci_ev_link_key_req *ev = (void *) skb->data;
2707 struct hci_cp_link_key_reply cp;
2708 struct hci_conn *conn;
2709 struct link_key *key;
2710
2711 BT_DBG("%s", hdev->name);
2712
2713 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2714 return;
2715
2716 hci_dev_lock(hdev);
2717
2718 key = hci_find_link_key(hdev, &ev->bdaddr);
2719 if (!key) {
2720 BT_DBG("%s link key not found for %pMR", hdev->name,
2721 &ev->bdaddr);
2722 goto not_found;
2723 }
2724
2725 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2726 &ev->bdaddr);
2727
2728 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2729 key->type == HCI_LK_DEBUG_COMBINATION) {
2730 BT_DBG("%s ignoring debug key", hdev->name);
2731 goto not_found;
2732 }
2733
2734 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2735 if (conn) {
2736 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2737 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2738 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2739 goto not_found;
2740 }
2741
2742 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2743 conn->pending_sec_level == BT_SECURITY_HIGH) {
2744 BT_DBG("%s ignoring key unauthenticated for high security",
2745 hdev->name);
2746 goto not_found;
2747 }
2748
2749 conn->key_type = key->type;
2750 conn->pin_length = key->pin_len;
2751 }
2752
2753 bacpy(&cp.bdaddr, &ev->bdaddr);
2754 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2755
2756 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2757
2758 hci_dev_unlock(hdev);
2759
2760 return;
2761
2762 not_found:
2763 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2764 hci_dev_unlock(hdev);
2765 }
2766
2767 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2768 {
2769 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2770 struct hci_conn *conn;
2771 u8 pin_len = 0;
2772
2773 BT_DBG("%s", hdev->name);
2774
2775 hci_dev_lock(hdev);
2776
2777 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2778 if (conn) {
2779 hci_conn_hold(conn);
2780 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2781 pin_len = conn->pin_length;
2782
2783 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2784 conn->key_type = ev->key_type;
2785
2786 hci_conn_drop(conn);
2787 }
2788
2789 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2790 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2791 ev->key_type, pin_len);
2792
2793 hci_dev_unlock(hdev);
2794 }
2795
2796 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2797 {
2798 struct hci_ev_clock_offset *ev = (void *) skb->data;
2799 struct hci_conn *conn;
2800
2801 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2802
2803 hci_dev_lock(hdev);
2804
2805 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2806 if (conn && !ev->status) {
2807 struct inquiry_entry *ie;
2808
2809 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2810 if (ie) {
2811 ie->data.clock_offset = ev->clock_offset;
2812 ie->timestamp = jiffies;
2813 }
2814 }
2815
2816 hci_dev_unlock(hdev);
2817 }
2818
2819 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2820 {
2821 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2822 struct hci_conn *conn;
2823
2824 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2825
2826 hci_dev_lock(hdev);
2827
2828 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2829 if (conn && !ev->status)
2830 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2831
2832 hci_dev_unlock(hdev);
2833 }
2834
2835 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2836 {
2837 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2838 struct inquiry_entry *ie;
2839
2840 BT_DBG("%s", hdev->name);
2841
2842 hci_dev_lock(hdev);
2843
2844 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2845 if (ie) {
2846 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2847 ie->timestamp = jiffies;
2848 }
2849
2850 hci_dev_unlock(hdev);
2851 }
2852
2853 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2854 struct sk_buff *skb)
2855 {
2856 struct inquiry_data data;
2857 int num_rsp = *((__u8 *) skb->data);
2858 bool name_known, ssp;
2859
2860 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2861
2862 if (!num_rsp)
2863 return;
2864
2865 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2866 return;
2867
2868 hci_dev_lock(hdev);
2869
2870 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2871 struct inquiry_info_with_rssi_and_pscan_mode *info;
2872 info = (void *) (skb->data + 1);
2873
2874 for (; num_rsp; num_rsp--, info++) {
2875 bacpy(&data.bdaddr, &info->bdaddr);
2876 data.pscan_rep_mode = info->pscan_rep_mode;
2877 data.pscan_period_mode = info->pscan_period_mode;
2878 data.pscan_mode = info->pscan_mode;
2879 memcpy(data.dev_class, info->dev_class, 3);
2880 data.clock_offset = info->clock_offset;
2881 data.rssi = info->rssi;
2882 data.ssp_mode = 0x00;
2883
2884 name_known = hci_inquiry_cache_update(hdev, &data,
2885 false, &ssp);
2886 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2887 info->dev_class, info->rssi,
2888 !name_known, ssp, NULL, 0);
2889 }
2890 } else {
2891 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2892
2893 for (; num_rsp; num_rsp--, info++) {
2894 bacpy(&data.bdaddr, &info->bdaddr);
2895 data.pscan_rep_mode = info->pscan_rep_mode;
2896 data.pscan_period_mode = info->pscan_period_mode;
2897 data.pscan_mode = 0x00;
2898 memcpy(data.dev_class, info->dev_class, 3);
2899 data.clock_offset = info->clock_offset;
2900 data.rssi = info->rssi;
2901 data.ssp_mode = 0x00;
2902 name_known = hci_inquiry_cache_update(hdev, &data,
2903 false, &ssp);
2904 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2905 info->dev_class, info->rssi,
2906 !name_known, ssp, NULL, 0);
2907 }
2908 }
2909
2910 hci_dev_unlock(hdev);
2911 }
2912
2913 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2914 struct sk_buff *skb)
2915 {
2916 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2917 struct hci_conn *conn;
2918
2919 BT_DBG("%s", hdev->name);
2920
2921 hci_dev_lock(hdev);
2922
2923 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2924 if (!conn)
2925 goto unlock;
2926
2927 if (!ev->status && ev->page == 0x01) {
2928 struct inquiry_entry *ie;
2929
2930 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2931 if (ie)
2932 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2933
2934 if (ev->features[0] & LMP_HOST_SSP)
2935 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2936 }
2937
2938 if (conn->state != BT_CONFIG)
2939 goto unlock;
2940
2941 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2942 struct hci_cp_remote_name_req cp;
2943 memset(&cp, 0, sizeof(cp));
2944 bacpy(&cp.bdaddr, &conn->dst);
2945 cp.pscan_rep_mode = 0x02;
2946 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2947 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2948 mgmt_device_connected(hdev, &conn->dst, conn->type,
2949 conn->dst_type, 0, NULL, 0,
2950 conn->dev_class);
2951
2952 if (!hci_outgoing_auth_needed(hdev, conn)) {
2953 conn->state = BT_CONNECTED;
2954 hci_proto_connect_cfm(conn, ev->status);
2955 hci_conn_drop(conn);
2956 }
2957
2958 unlock:
2959 hci_dev_unlock(hdev);
2960 }
2961
2962 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2963 struct sk_buff *skb)
2964 {
2965 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2966 struct hci_conn *conn;
2967
2968 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2969
2970 hci_dev_lock(hdev);
2971
2972 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2973 if (!conn) {
2974 if (ev->link_type == ESCO_LINK)
2975 goto unlock;
2976
2977 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2978 if (!conn)
2979 goto unlock;
2980
2981 conn->type = SCO_LINK;
2982 }
2983
2984 switch (ev->status) {
2985 case 0x00:
2986 conn->handle = __le16_to_cpu(ev->handle);
2987 conn->state = BT_CONNECTED;
2988
2989 hci_conn_add_sysfs(conn);
2990 break;
2991
2992 case 0x11: /* Unsupported Feature or Parameter Value */
2993 case 0x1c: /* SCO interval rejected */
2994 case 0x1a: /* Unsupported Remote Feature */
2995 case 0x1f: /* Unspecified error */
2996 if (conn->out && conn->attempt < 2) {
2997 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2998 (hdev->esco_type & EDR_ESCO_MASK);
2999 hci_setup_sync(conn, conn->link->handle);
3000 goto unlock;
3001 }
3002 /* fall through */
3003
3004 default:
3005 conn->state = BT_CLOSED;
3006 break;
3007 }
3008
3009 hci_proto_connect_cfm(conn, ev->status);
3010 if (ev->status)
3011 hci_conn_del(conn);
3012
3013 unlock:
3014 hci_dev_unlock(hdev);
3015 }
3016
3017 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3018 struct sk_buff *skb)
3019 {
3020 struct inquiry_data data;
3021 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3022 int num_rsp = *((__u8 *) skb->data);
3023 size_t eir_len;
3024
3025 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3026
3027 if (!num_rsp)
3028 return;
3029
3030 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3031 return;
3032
3033 hci_dev_lock(hdev);
3034
3035 for (; num_rsp; num_rsp--, info++) {
3036 bool name_known, ssp;
3037
3038 bacpy(&data.bdaddr, &info->bdaddr);
3039 data.pscan_rep_mode = info->pscan_rep_mode;
3040 data.pscan_period_mode = info->pscan_period_mode;
3041 data.pscan_mode = 0x00;
3042 memcpy(data.dev_class, info->dev_class, 3);
3043 data.clock_offset = info->clock_offset;
3044 data.rssi = info->rssi;
3045 data.ssp_mode = 0x01;
3046
3047 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3048 name_known = eir_has_data_type(info->data,
3049 sizeof(info->data),
3050 EIR_NAME_COMPLETE);
3051 else
3052 name_known = true;
3053
3054 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3055 &ssp);
3056 eir_len = eir_get_length(info->data, sizeof(info->data));
3057 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3058 info->dev_class, info->rssi, !name_known,
3059 ssp, info->data, eir_len);
3060 }
3061
3062 hci_dev_unlock(hdev);
3063 }
3064
3065 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3066 struct sk_buff *skb)
3067 {
3068 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3069 struct hci_conn *conn;
3070
3071 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3072 __le16_to_cpu(ev->handle));
3073
3074 hci_dev_lock(hdev);
3075
3076 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3077 if (!conn)
3078 goto unlock;
3079
3080 if (!ev->status)
3081 conn->sec_level = conn->pending_sec_level;
3082
3083 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3084
3085 if (ev->status && conn->state == BT_CONNECTED) {
3086 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3087 hci_conn_drop(conn);
3088 goto unlock;
3089 }
3090
3091 if (conn->state == BT_CONFIG) {
3092 if (!ev->status)
3093 conn->state = BT_CONNECTED;
3094
3095 hci_proto_connect_cfm(conn, ev->status);
3096 hci_conn_drop(conn);
3097 } else {
3098 hci_auth_cfm(conn, ev->status);
3099
3100 hci_conn_hold(conn);
3101 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3102 hci_conn_drop(conn);
3103 }
3104
3105 unlock:
3106 hci_dev_unlock(hdev);
3107 }
3108
3109 static u8 hci_get_auth_req(struct hci_conn *conn)
3110 {
3111 /* If remote requests dedicated bonding follow that lead */
3112 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3113 /* If both remote and local IO capabilities allow MITM
3114 * protection then require it, otherwise don't */
3115 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3116 return 0x02;
3117 else
3118 return 0x03;
3119 }
3120
3121 /* If remote requests no-bonding follow that lead */
3122 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3123 return conn->remote_auth | (conn->auth_type & 0x01);
3124
3125 return conn->auth_type;
3126 }
3127
3128 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3129 {
3130 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3131 struct hci_conn *conn;
3132
3133 BT_DBG("%s", hdev->name);
3134
3135 hci_dev_lock(hdev);
3136
3137 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3138 if (!conn)
3139 goto unlock;
3140
3141 hci_conn_hold(conn);
3142
3143 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3144 goto unlock;
3145
3146 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3147 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3148 struct hci_cp_io_capability_reply cp;
3149
3150 bacpy(&cp.bdaddr, &ev->bdaddr);
3151 /* Change the IO capability from KeyboardDisplay
3152 * to DisplayYesNo as it is not supported by BT spec. */
3153 cp.capability = (conn->io_capability == 0x04) ?
3154 0x01 : conn->io_capability;
3155 conn->auth_type = hci_get_auth_req(conn);
3156 cp.authentication = conn->auth_type;
3157
3158 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3159 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3160 cp.oob_data = 0x01;
3161 else
3162 cp.oob_data = 0x00;
3163
3164 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3165 sizeof(cp), &cp);
3166 } else {
3167 struct hci_cp_io_capability_neg_reply cp;
3168
3169 bacpy(&cp.bdaddr, &ev->bdaddr);
3170 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3171
3172 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3173 sizeof(cp), &cp);
3174 }
3175
3176 unlock:
3177 hci_dev_unlock(hdev);
3178 }
3179
3180 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3181 {
3182 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3183 struct hci_conn *conn;
3184
3185 BT_DBG("%s", hdev->name);
3186
3187 hci_dev_lock(hdev);
3188
3189 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3190 if (!conn)
3191 goto unlock;
3192
3193 conn->remote_cap = ev->capability;
3194 conn->remote_auth = ev->authentication;
3195 if (ev->oob_data)
3196 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3197
3198 unlock:
3199 hci_dev_unlock(hdev);
3200 }
3201
3202 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3203 struct sk_buff *skb)
3204 {
3205 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3206 int loc_mitm, rem_mitm, confirm_hint = 0;
3207 struct hci_conn *conn;
3208
3209 BT_DBG("%s", hdev->name);
3210
3211 hci_dev_lock(hdev);
3212
3213 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3214 goto unlock;
3215
3216 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3217 if (!conn)
3218 goto unlock;
3219
3220 loc_mitm = (conn->auth_type & 0x01);
3221 rem_mitm = (conn->remote_auth & 0x01);
3222
3223 /* If we require MITM but the remote device can't provide that
3224 * (it has NoInputNoOutput) then reject the confirmation
3225 * request. The only exception is when we're dedicated bonding
3226 * initiators (connect_cfm_cb set) since then we always have the MITM
3227 * bit set. */
3228 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3229 BT_DBG("Rejecting request: remote device can't provide MITM");
3230 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3231 sizeof(ev->bdaddr), &ev->bdaddr);
3232 goto unlock;
3233 }
3234
3235 /* If no side requires MITM protection; auto-accept */
3236 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3237 (!rem_mitm || conn->io_capability == 0x03)) {
3238
3239 /* If we're not the initiators request authorization to
3240 * proceed from user space (mgmt_user_confirm with
3241 * confirm_hint set to 1). */
3242 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3243 BT_DBG("Confirming auto-accept as acceptor");
3244 confirm_hint = 1;
3245 goto confirm;
3246 }
3247
3248 BT_DBG("Auto-accept of user confirmation with %ums delay",
3249 hdev->auto_accept_delay);
3250
3251 if (hdev->auto_accept_delay > 0) {
3252 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3253 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3254 goto unlock;
3255 }
3256
3257 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3258 sizeof(ev->bdaddr), &ev->bdaddr);
3259 goto unlock;
3260 }
3261
3262 confirm:
3263 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3264 confirm_hint);
3265
3266 unlock:
3267 hci_dev_unlock(hdev);
3268 }
3269
3270 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3271 struct sk_buff *skb)
3272 {
3273 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3274
3275 BT_DBG("%s", hdev->name);
3276
3277 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3278 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3279 }
3280
3281 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3282 struct sk_buff *skb)
3283 {
3284 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3285 struct hci_conn *conn;
3286
3287 BT_DBG("%s", hdev->name);
3288
3289 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3290 if (!conn)
3291 return;
3292
3293 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3294 conn->passkey_entered = 0;
3295
3296 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3297 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3298 conn->dst_type, conn->passkey_notify,
3299 conn->passkey_entered);
3300 }
3301
3302 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3303 {
3304 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3305 struct hci_conn *conn;
3306
3307 BT_DBG("%s", hdev->name);
3308
3309 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3310 if (!conn)
3311 return;
3312
3313 switch (ev->type) {
3314 case HCI_KEYPRESS_STARTED:
3315 conn->passkey_entered = 0;
3316 return;
3317
3318 case HCI_KEYPRESS_ENTERED:
3319 conn->passkey_entered++;
3320 break;
3321
3322 case HCI_KEYPRESS_ERASED:
3323 conn->passkey_entered--;
3324 break;
3325
3326 case HCI_KEYPRESS_CLEARED:
3327 conn->passkey_entered = 0;
3328 break;
3329
3330 case HCI_KEYPRESS_COMPLETED:
3331 return;
3332 }
3333
3334 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3335 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3336 conn->dst_type, conn->passkey_notify,
3337 conn->passkey_entered);
3338 }
3339
3340 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3341 struct sk_buff *skb)
3342 {
3343 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3344 struct hci_conn *conn;
3345
3346 BT_DBG("%s", hdev->name);
3347
3348 hci_dev_lock(hdev);
3349
3350 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3351 if (!conn)
3352 goto unlock;
3353
3354 /* To avoid duplicate auth_failed events to user space we check
3355 * the HCI_CONN_AUTH_PEND flag which will be set if we
3356 * initiated the authentication. A traditional auth_complete
3357 * event gets always produced as initiator and is also mapped to
3358 * the mgmt_auth_failed event */
3359 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3360 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3361 ev->status);
3362
3363 hci_conn_drop(conn);
3364
3365 unlock:
3366 hci_dev_unlock(hdev);
3367 }
3368
3369 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3370 struct sk_buff *skb)
3371 {
3372 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3373 struct inquiry_entry *ie;
3374
3375 BT_DBG("%s", hdev->name);
3376
3377 hci_dev_lock(hdev);
3378
3379 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3380 if (ie)
3381 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3382
3383 hci_dev_unlock(hdev);
3384 }
3385
3386 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3387 struct sk_buff *skb)
3388 {
3389 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3390 struct oob_data *data;
3391
3392 BT_DBG("%s", hdev->name);
3393
3394 hci_dev_lock(hdev);
3395
3396 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3397 goto unlock;
3398
3399 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3400 if (data) {
3401 struct hci_cp_remote_oob_data_reply cp;
3402
3403 bacpy(&cp.bdaddr, &ev->bdaddr);
3404 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3405 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3406
3407 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3408 &cp);
3409 } else {
3410 struct hci_cp_remote_oob_data_neg_reply cp;
3411
3412 bacpy(&cp.bdaddr, &ev->bdaddr);
3413 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3414 &cp);
3415 }
3416
3417 unlock:
3418 hci_dev_unlock(hdev);
3419 }
3420
3421 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3422 struct sk_buff *skb)
3423 {
3424 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3425 struct hci_conn *hcon, *bredr_hcon;
3426
3427 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3428 ev->status);
3429
3430 hci_dev_lock(hdev);
3431
3432 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3433 if (!hcon) {
3434 hci_dev_unlock(hdev);
3435 return;
3436 }
3437
3438 if (ev->status) {
3439 hci_conn_del(hcon);
3440 hci_dev_unlock(hdev);
3441 return;
3442 }
3443
3444 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3445
3446 hcon->state = BT_CONNECTED;
3447 bacpy(&hcon->dst, &bredr_hcon->dst);
3448
3449 hci_conn_hold(hcon);
3450 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3451 hci_conn_drop(hcon);
3452
3453 hci_conn_add_sysfs(hcon);
3454
3455 amp_physical_cfm(bredr_hcon, hcon);
3456
3457 hci_dev_unlock(hdev);
3458 }
3459
3460 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3461 {
3462 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3463 struct hci_conn *hcon;
3464 struct hci_chan *hchan;
3465 struct amp_mgr *mgr;
3466
3467 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3468 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3469 ev->status);
3470
3471 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3472 if (!hcon)
3473 return;
3474
3475 /* Create AMP hchan */
3476 hchan = hci_chan_create(hcon);
3477 if (!hchan)
3478 return;
3479
3480 hchan->handle = le16_to_cpu(ev->handle);
3481
3482 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3483
3484 mgr = hcon->amp_mgr;
3485 if (mgr && mgr->bredr_chan) {
3486 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3487
3488 l2cap_chan_lock(bredr_chan);
3489
3490 bredr_chan->conn->mtu = hdev->block_mtu;
3491 l2cap_logical_cfm(bredr_chan, hchan, 0);
3492 hci_conn_hold(hcon);
3493
3494 l2cap_chan_unlock(bredr_chan);
3495 }
3496 }
3497
3498 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3499 struct sk_buff *skb)
3500 {
3501 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3502 struct hci_chan *hchan;
3503
3504 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3505 le16_to_cpu(ev->handle), ev->status);
3506
3507 if (ev->status)
3508 return;
3509
3510 hci_dev_lock(hdev);
3511
3512 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3513 if (!hchan)
3514 goto unlock;
3515
3516 amp_destroy_logical_link(hchan, ev->reason);
3517
3518 unlock:
3519 hci_dev_unlock(hdev);
3520 }
3521
3522 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3523 struct sk_buff *skb)
3524 {
3525 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3526 struct hci_conn *hcon;
3527
3528 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3529
3530 if (ev->status)
3531 return;
3532
3533 hci_dev_lock(hdev);
3534
3535 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3536 if (hcon) {
3537 hcon->state = BT_CLOSED;
3538 hci_conn_del(hcon);
3539 }
3540
3541 hci_dev_unlock(hdev);
3542 }
3543
3544 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3545 {
3546 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3547 struct hci_conn *conn;
3548
3549 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3550
3551 hci_dev_lock(hdev);
3552
3553 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3554 if (!conn) {
3555 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3556 if (!conn) {
3557 BT_ERR("No memory for new connection");
3558 goto unlock;
3559 }
3560
3561 conn->dst_type = ev->bdaddr_type;
3562
3563 if (ev->role == LE_CONN_ROLE_MASTER) {
3564 conn->out = true;
3565 conn->link_mode |= HCI_LM_MASTER;
3566 }
3567 }
3568
3569 if (ev->status) {
3570 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3571 conn->dst_type, ev->status);
3572 hci_proto_connect_cfm(conn, ev->status);
3573 conn->state = BT_CLOSED;
3574 hci_conn_del(conn);
3575 goto unlock;
3576 }
3577
3578 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3579 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3580 conn->dst_type, 0, NULL, 0, NULL);
3581
3582 conn->sec_level = BT_SECURITY_LOW;
3583 conn->handle = __le16_to_cpu(ev->handle);
3584 conn->state = BT_CONNECTED;
3585
3586 hci_conn_add_sysfs(conn);
3587
3588 hci_proto_connect_cfm(conn, ev->status);
3589
3590 unlock:
3591 hci_dev_unlock(hdev);
3592 }
3593
3594 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3595 {
3596 u8 num_reports = skb->data[0];
3597 void *ptr = &skb->data[1];
3598 s8 rssi;
3599
3600 while (num_reports--) {
3601 struct hci_ev_le_advertising_info *ev = ptr;
3602
3603 rssi = ev->data[ev->length];
3604 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3605 NULL, rssi, 0, 1, ev->data, ev->length);
3606
3607 ptr += sizeof(*ev) + ev->length + 1;
3608 }
3609 }
3610
3611 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3612 {
3613 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3614 struct hci_cp_le_ltk_reply cp;
3615 struct hci_cp_le_ltk_neg_reply neg;
3616 struct hci_conn *conn;
3617 struct smp_ltk *ltk;
3618
3619 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3620
3621 hci_dev_lock(hdev);
3622
3623 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3624 if (conn == NULL)
3625 goto not_found;
3626
3627 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3628 if (ltk == NULL)
3629 goto not_found;
3630
3631 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3632 cp.handle = cpu_to_le16(conn->handle);
3633
3634 if (ltk->authenticated)
3635 conn->sec_level = BT_SECURITY_HIGH;
3636
3637 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3638
3639 if (ltk->type & HCI_SMP_STK) {
3640 list_del(&ltk->list);
3641 kfree(ltk);
3642 }
3643
3644 hci_dev_unlock(hdev);
3645
3646 return;
3647
3648 not_found:
3649 neg.handle = ev->handle;
3650 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3651 hci_dev_unlock(hdev);
3652 }
3653
3654 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3655 {
3656 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3657
3658 skb_pull(skb, sizeof(*le_ev));
3659
3660 switch (le_ev->subevent) {
3661 case HCI_EV_LE_CONN_COMPLETE:
3662 hci_le_conn_complete_evt(hdev, skb);
3663 break;
3664
3665 case HCI_EV_LE_ADVERTISING_REPORT:
3666 hci_le_adv_report_evt(hdev, skb);
3667 break;
3668
3669 case HCI_EV_LE_LTK_REQ:
3670 hci_le_ltk_request_evt(hdev, skb);
3671 break;
3672
3673 default:
3674 break;
3675 }
3676 }
3677
3678 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3679 {
3680 struct hci_ev_channel_selected *ev = (void *) skb->data;
3681 struct hci_conn *hcon;
3682
3683 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3684
3685 skb_pull(skb, sizeof(*ev));
3686
3687 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3688 if (!hcon)
3689 return;
3690
3691 amp_read_loc_assoc_final_data(hdev, hcon);
3692 }
3693
3694 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3695 {
3696 struct hci_event_hdr *hdr = (void *) skb->data;
3697 __u8 event = hdr->evt;
3698
3699 hci_dev_lock(hdev);
3700
3701 /* Received events are (currently) only needed when a request is
3702 * ongoing so avoid unnecessary memory allocation.
3703 */
3704 if (hdev->req_status == HCI_REQ_PEND) {
3705 kfree_skb(hdev->recv_evt);
3706 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3707 }
3708
3709 hci_dev_unlock(hdev);
3710
3711 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3712
3713 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3714 struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data;
3715 u16 opcode = __le16_to_cpu(hdr->opcode);
3716
3717 hci_req_cmd_complete(hdev, opcode, 0);
3718 }
3719
3720 switch (event) {
3721 case HCI_EV_INQUIRY_COMPLETE:
3722 hci_inquiry_complete_evt(hdev, skb);
3723 break;
3724
3725 case HCI_EV_INQUIRY_RESULT:
3726 hci_inquiry_result_evt(hdev, skb);
3727 break;
3728
3729 case HCI_EV_CONN_COMPLETE:
3730 hci_conn_complete_evt(hdev, skb);
3731 break;
3732
3733 case HCI_EV_CONN_REQUEST:
3734 hci_conn_request_evt(hdev, skb);
3735 break;
3736
3737 case HCI_EV_DISCONN_COMPLETE:
3738 hci_disconn_complete_evt(hdev, skb);
3739 break;
3740
3741 case HCI_EV_AUTH_COMPLETE:
3742 hci_auth_complete_evt(hdev, skb);
3743 break;
3744
3745 case HCI_EV_REMOTE_NAME:
3746 hci_remote_name_evt(hdev, skb);
3747 break;
3748
3749 case HCI_EV_ENCRYPT_CHANGE:
3750 hci_encrypt_change_evt(hdev, skb);
3751 break;
3752
3753 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3754 hci_change_link_key_complete_evt(hdev, skb);
3755 break;
3756
3757 case HCI_EV_REMOTE_FEATURES:
3758 hci_remote_features_evt(hdev, skb);
3759 break;
3760
3761 case HCI_EV_CMD_COMPLETE:
3762 hci_cmd_complete_evt(hdev, skb);
3763 break;
3764
3765 case HCI_EV_CMD_STATUS:
3766 hci_cmd_status_evt(hdev, skb);
3767 break;
3768
3769 case HCI_EV_ROLE_CHANGE:
3770 hci_role_change_evt(hdev, skb);
3771 break;
3772
3773 case HCI_EV_NUM_COMP_PKTS:
3774 hci_num_comp_pkts_evt(hdev, skb);
3775 break;
3776
3777 case HCI_EV_MODE_CHANGE:
3778 hci_mode_change_evt(hdev, skb);
3779 break;
3780
3781 case HCI_EV_PIN_CODE_REQ:
3782 hci_pin_code_request_evt(hdev, skb);
3783 break;
3784
3785 case HCI_EV_LINK_KEY_REQ:
3786 hci_link_key_request_evt(hdev, skb);
3787 break;
3788
3789 case HCI_EV_LINK_KEY_NOTIFY:
3790 hci_link_key_notify_evt(hdev, skb);
3791 break;
3792
3793 case HCI_EV_CLOCK_OFFSET:
3794 hci_clock_offset_evt(hdev, skb);
3795 break;
3796
3797 case HCI_EV_PKT_TYPE_CHANGE:
3798 hci_pkt_type_change_evt(hdev, skb);
3799 break;
3800
3801 case HCI_EV_PSCAN_REP_MODE:
3802 hci_pscan_rep_mode_evt(hdev, skb);
3803 break;
3804
3805 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3806 hci_inquiry_result_with_rssi_evt(hdev, skb);
3807 break;
3808
3809 case HCI_EV_REMOTE_EXT_FEATURES:
3810 hci_remote_ext_features_evt(hdev, skb);
3811 break;
3812
3813 case HCI_EV_SYNC_CONN_COMPLETE:
3814 hci_sync_conn_complete_evt(hdev, skb);
3815 break;
3816
3817 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3818 hci_extended_inquiry_result_evt(hdev, skb);
3819 break;
3820
3821 case HCI_EV_KEY_REFRESH_COMPLETE:
3822 hci_key_refresh_complete_evt(hdev, skb);
3823 break;
3824
3825 case HCI_EV_IO_CAPA_REQUEST:
3826 hci_io_capa_request_evt(hdev, skb);
3827 break;
3828
3829 case HCI_EV_IO_CAPA_REPLY:
3830 hci_io_capa_reply_evt(hdev, skb);
3831 break;
3832
3833 case HCI_EV_USER_CONFIRM_REQUEST:
3834 hci_user_confirm_request_evt(hdev, skb);
3835 break;
3836
3837 case HCI_EV_USER_PASSKEY_REQUEST:
3838 hci_user_passkey_request_evt(hdev, skb);
3839 break;
3840
3841 case HCI_EV_USER_PASSKEY_NOTIFY:
3842 hci_user_passkey_notify_evt(hdev, skb);
3843 break;
3844
3845 case HCI_EV_KEYPRESS_NOTIFY:
3846 hci_keypress_notify_evt(hdev, skb);
3847 break;
3848
3849 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3850 hci_simple_pair_complete_evt(hdev, skb);
3851 break;
3852
3853 case HCI_EV_REMOTE_HOST_FEATURES:
3854 hci_remote_host_features_evt(hdev, skb);
3855 break;
3856
3857 case HCI_EV_LE_META:
3858 hci_le_meta_evt(hdev, skb);
3859 break;
3860
3861 case HCI_EV_CHANNEL_SELECTED:
3862 hci_chan_selected_evt(hdev, skb);
3863 break;
3864
3865 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3866 hci_remote_oob_data_request_evt(hdev, skb);
3867 break;
3868
3869 case HCI_EV_PHY_LINK_COMPLETE:
3870 hci_phy_link_complete_evt(hdev, skb);
3871 break;
3872
3873 case HCI_EV_LOGICAL_LINK_COMPLETE:
3874 hci_loglink_complete_evt(hdev, skb);
3875 break;
3876
3877 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3878 hci_disconn_loglink_complete_evt(hdev, skb);
3879 break;
3880
3881 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3882 hci_disconn_phylink_complete_evt(hdev, skb);
3883 break;
3884
3885 case HCI_EV_NUM_COMP_BLOCKS:
3886 hci_num_comp_blocks_evt(hdev, skb);
3887 break;
3888
3889 default:
3890 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3891 break;
3892 }
3893
3894 kfree_skb(skb);
3895 hdev->stat.evt_rx++;
3896 }