84edacbc14a10b2d929d1bc978a211572a2aa3b5
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
34
35 /* Handle HCI Event packets */
36
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 __u8 status = *((__u8 *) skb->data);
40
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
42
43 if (status) {
44 hci_dev_lock(hdev);
45 mgmt_stop_discovery_failed(hdev, status);
46 hci_dev_unlock(hdev);
47 return;
48 }
49
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status);
57
58 hci_conn_check_pending(hdev);
59 }
60
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 {
63 __u8 status = *((__u8 *) skb->data);
64
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
66
67 if (status)
68 return;
69
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 }
72
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 __u8 status = *((__u8 *) skb->data);
76
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78
79 if (status)
80 return;
81
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83
84 hci_conn_check_pending(hdev);
85 }
86
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct sk_buff *skb)
161 {
162 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163
164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165
166 if (rp->status)
167 return;
168
169 hdev->link_policy = __le16_to_cpu(rp->policy);
170 }
171
172 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 struct sk_buff *skb)
174 {
175 __u8 status = *((__u8 *) skb->data);
176 void *sent;
177
178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
179
180 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 if (!sent)
182 return;
183
184 if (!status)
185 hdev->link_policy = get_unaligned_le16(sent);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 /* Reset all non-persistent flags */
197 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198
199 hdev->discovery.state = DISCOVERY_STOPPED;
200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202
203 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 hdev->adv_data_len = 0;
205 }
206
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208 {
209 __u8 status = *((__u8 *) skb->data);
210 void *sent;
211
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
213
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215 if (!sent)
216 return;
217
218 hci_dev_lock(hdev);
219
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
222 else if (!status)
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224
225 hci_dev_unlock(hdev);
226 }
227
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
231
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233
234 if (rp->status)
235 return;
236
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
245
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
247
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 if (!sent)
250 return;
251
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
254
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
257 else
258 clear_bit(HCI_AUTH, &hdev->flags);
259 }
260
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
263 }
264
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
266 {
267 __u8 status = *((__u8 *) skb->data);
268 void *sent;
269
270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
271
272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
273 if (!sent)
274 return;
275
276 if (!status) {
277 __u8 param = *((__u8 *) sent);
278
279 if (param)
280 set_bit(HCI_ENCRYPT, &hdev->flags);
281 else
282 clear_bit(HCI_ENCRYPT, &hdev->flags);
283 }
284 }
285
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 {
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
290 void *sent;
291
292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
293
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 if (!sent)
296 return;
297
298 param = *((__u8 *) sent);
299
300 hci_dev_lock(hdev);
301
302 if (status) {
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
305 goto done;
306 }
307
308 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310
311 if (param & SCAN_INQUIRY) {
312 set_bit(HCI_ISCAN, &hdev->flags);
313 if (!old_iscan)
314 mgmt_discoverable(hdev, 1);
315 if (hdev->discov_timeout > 0) {
316 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318 to);
319 }
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
322
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
325 if (!old_pscan)
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
329
330 done:
331 hci_dev_unlock(hdev);
332 }
333
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337
338 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
339
340 if (rp->status)
341 return;
342
343 memcpy(hdev->dev_class, rp->dev_class, 3);
344
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 __u8 status = *((__u8 *) skb->data);
352 void *sent;
353
354 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 if (!sent)
358 return;
359
360 hci_dev_lock(hdev);
361
362 if (status == 0)
363 memcpy(hdev->dev_class, sent, 3);
364
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
367
368 hci_dev_unlock(hdev);
369 }
370
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 __u16 setting;
375
376 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
377
378 if (rp->status)
379 return;
380
381 setting = __le16_to_cpu(rp->voice_setting);
382
383 if (hdev->voice_setting == setting)
384 return;
385
386 hdev->voice_setting = setting;
387
388 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
389
390 if (hdev->notify)
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
395 struct sk_buff *skb)
396 {
397 __u8 status = *((__u8 *) skb->data);
398 __u16 setting;
399 void *sent;
400
401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
402
403 if (status)
404 return;
405
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 if (!sent)
408 return;
409
410 setting = get_unaligned_le16(sent);
411
412 if (hdev->voice_setting == setting)
413 return;
414
415 hdev->voice_setting = setting;
416
417 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
418
419 if (hdev->notify)
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421 }
422
423 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 __u8 status = *((__u8 *) skb->data);
426 struct hci_cp_write_ssp_mode *sent;
427
428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
429
430 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
431 if (!sent)
432 return;
433
434 if (!status) {
435 if (sent->mode)
436 hdev->host_features[0] |= LMP_HOST_SSP;
437 else
438 hdev->host_features[0] &= ~LMP_HOST_SSP;
439 }
440
441 if (test_bit(HCI_MGMT, &hdev->dev_flags))
442 mgmt_ssp_enable_complete(hdev, sent->mode, status);
443 else if (!status) {
444 if (sent->mode)
445 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
446 else
447 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 }
449 }
450
451 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
452 {
453 struct hci_rp_read_local_version *rp = (void *) skb->data;
454
455 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
456
457 if (rp->status)
458 return;
459
460 hdev->hci_ver = rp->hci_ver;
461 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
462 hdev->lmp_ver = rp->lmp_ver;
463 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
464 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
465
466 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
467 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
468 }
469
470 static void hci_cc_read_local_commands(struct hci_dev *hdev,
471 struct sk_buff *skb)
472 {
473 struct hci_rp_read_local_commands *rp = (void *) skb->data;
474
475 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
476
477 if (!rp->status)
478 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
479 }
480
481 static void hci_cc_read_local_features(struct hci_dev *hdev,
482 struct sk_buff *skb)
483 {
484 struct hci_rp_read_local_features *rp = (void *) skb->data;
485
486 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
487
488 if (rp->status)
489 return;
490
491 memcpy(hdev->features, rp->features, 8);
492
493 /* Adjust default settings according to features
494 * supported by device. */
495
496 if (hdev->features[0] & LMP_3SLOT)
497 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
498
499 if (hdev->features[0] & LMP_5SLOT)
500 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
501
502 if (hdev->features[1] & LMP_HV2) {
503 hdev->pkt_type |= (HCI_HV2);
504 hdev->esco_type |= (ESCO_HV2);
505 }
506
507 if (hdev->features[1] & LMP_HV3) {
508 hdev->pkt_type |= (HCI_HV3);
509 hdev->esco_type |= (ESCO_HV3);
510 }
511
512 if (lmp_esco_capable(hdev))
513 hdev->esco_type |= (ESCO_EV3);
514
515 if (hdev->features[4] & LMP_EV4)
516 hdev->esco_type |= (ESCO_EV4);
517
518 if (hdev->features[4] & LMP_EV5)
519 hdev->esco_type |= (ESCO_EV5);
520
521 if (hdev->features[5] & LMP_EDR_ESCO_2M)
522 hdev->esco_type |= (ESCO_2EV3);
523
524 if (hdev->features[5] & LMP_EDR_ESCO_3M)
525 hdev->esco_type |= (ESCO_3EV3);
526
527 if (hdev->features[5] & LMP_EDR_3S_ESCO)
528 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
529
530 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
531 hdev->features[0], hdev->features[1],
532 hdev->features[2], hdev->features[3],
533 hdev->features[4], hdev->features[5],
534 hdev->features[6], hdev->features[7]);
535 }
536
537 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
538 struct sk_buff *skb)
539 {
540 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
541
542 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
543
544 if (rp->status)
545 return;
546
547 switch (rp->page) {
548 case 0:
549 memcpy(hdev->features, rp->features, 8);
550 break;
551 case 1:
552 memcpy(hdev->host_features, rp->features, 8);
553 break;
554 }
555 }
556
557 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
558 struct sk_buff *skb)
559 {
560 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
561
562 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
563
564 if (!rp->status)
565 hdev->flow_ctl_mode = rp->mode;
566 }
567
568 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
569 {
570 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
571
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573
574 if (rp->status)
575 return;
576
577 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
578 hdev->sco_mtu = rp->sco_mtu;
579 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
580 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
581
582 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
583 hdev->sco_mtu = 64;
584 hdev->sco_pkts = 8;
585 }
586
587 hdev->acl_cnt = hdev->acl_pkts;
588 hdev->sco_cnt = hdev->sco_pkts;
589
590 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
591 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
592 }
593
594 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
595 {
596 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
597
598 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
599
600 if (!rp->status)
601 bacpy(&hdev->bdaddr, &rp->bdaddr);
602 }
603
604 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
605 struct sk_buff *skb)
606 {
607 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
608
609 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610
611 if (rp->status)
612 return;
613
614 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
615 hdev->block_len = __le16_to_cpu(rp->block_len);
616 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
617
618 hdev->block_cnt = hdev->num_blocks;
619
620 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
621 hdev->block_cnt, hdev->block_len);
622 }
623
624 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
625 struct sk_buff *skb)
626 {
627 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
628
629 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
630
631 if (rp->status)
632 goto a2mp_rsp;
633
634 hdev->amp_status = rp->amp_status;
635 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
636 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
637 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
638 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
639 hdev->amp_type = rp->amp_type;
640 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
641 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
642 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
643 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
644
645 a2mp_rsp:
646 a2mp_send_getinfo_rsp(hdev);
647 }
648
649 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
650 struct sk_buff *skb)
651 {
652 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
653 struct amp_assoc *assoc = &hdev->loc_assoc;
654 size_t rem_len, frag_len;
655
656 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
657
658 if (rp->status)
659 goto a2mp_rsp;
660
661 frag_len = skb->len - sizeof(*rp);
662 rem_len = __le16_to_cpu(rp->rem_len);
663
664 if (rem_len > frag_len) {
665 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
666
667 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
668 assoc->offset += frag_len;
669
670 /* Read other fragments */
671 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
672
673 return;
674 }
675
676 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
677 assoc->len = assoc->offset + rem_len;
678 assoc->offset = 0;
679
680 a2mp_rsp:
681 /* Send A2MP Rsp when all fragments are received */
682 a2mp_send_getampassoc_rsp(hdev, rp->status);
683 a2mp_send_create_phy_link_req(hdev, rp->status);
684 }
685
686 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
687 struct sk_buff *skb)
688 {
689 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
690
691 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
692
693 if (!rp->status)
694 hdev->inq_tx_power = rp->tx_power;
695 }
696
697 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
698 {
699 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
700 struct hci_cp_pin_code_reply *cp;
701 struct hci_conn *conn;
702
703 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
704
705 hci_dev_lock(hdev);
706
707 if (test_bit(HCI_MGMT, &hdev->dev_flags))
708 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
709
710 if (rp->status)
711 goto unlock;
712
713 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
714 if (!cp)
715 goto unlock;
716
717 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
718 if (conn)
719 conn->pin_length = cp->pin_len;
720
721 unlock:
722 hci_dev_unlock(hdev);
723 }
724
725 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
726 {
727 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
728
729 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
730
731 hci_dev_lock(hdev);
732
733 if (test_bit(HCI_MGMT, &hdev->dev_flags))
734 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
735 rp->status);
736
737 hci_dev_unlock(hdev);
738 }
739
740 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
741 struct sk_buff *skb)
742 {
743 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
744
745 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
746
747 if (rp->status)
748 return;
749
750 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
751 hdev->le_pkts = rp->le_max_pkt;
752
753 hdev->le_cnt = hdev->le_pkts;
754
755 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
756 }
757
758 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
759 struct sk_buff *skb)
760 {
761 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
762
763 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
764
765 if (!rp->status)
766 memcpy(hdev->le_features, rp->features, 8);
767 }
768
769 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
770 struct sk_buff *skb)
771 {
772 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
773
774 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
775
776 if (!rp->status)
777 hdev->adv_tx_power = rp->tx_power;
778 }
779
780 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
781 {
782 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
783
784 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
785
786 hci_dev_lock(hdev);
787
788 if (test_bit(HCI_MGMT, &hdev->dev_flags))
789 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
790 rp->status);
791
792 hci_dev_unlock(hdev);
793 }
794
795 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
796 struct sk_buff *skb)
797 {
798 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
799
800 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
801
802 hci_dev_lock(hdev);
803
804 if (test_bit(HCI_MGMT, &hdev->dev_flags))
805 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
806 ACL_LINK, 0, rp->status);
807
808 hci_dev_unlock(hdev);
809 }
810
811 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
812 {
813 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
814
815 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
816
817 hci_dev_lock(hdev);
818
819 if (test_bit(HCI_MGMT, &hdev->dev_flags))
820 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
821 0, rp->status);
822
823 hci_dev_unlock(hdev);
824 }
825
826 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
827 struct sk_buff *skb)
828 {
829 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
830
831 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
832
833 hci_dev_lock(hdev);
834
835 if (test_bit(HCI_MGMT, &hdev->dev_flags))
836 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
837 ACL_LINK, 0, rp->status);
838
839 hci_dev_unlock(hdev);
840 }
841
842 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
843 struct sk_buff *skb)
844 {
845 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
846
847 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
848
849 hci_dev_lock(hdev);
850 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
851 rp->randomizer, rp->status);
852 hci_dev_unlock(hdev);
853 }
854
855 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
856 {
857 __u8 *sent, status = *((__u8 *) skb->data);
858
859 BT_DBG("%s status 0x%2.2x", hdev->name, status);
860
861 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
862 if (!sent)
863 return;
864
865 hci_dev_lock(hdev);
866
867 if (!status) {
868 if (*sent)
869 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
870 else
871 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
872 }
873
874 if (!test_bit(HCI_INIT, &hdev->flags)) {
875 struct hci_request req;
876
877 hci_req_init(&req, hdev);
878 hci_update_ad(&req);
879 hci_req_run(&req, NULL);
880 }
881
882 hci_dev_unlock(hdev);
883 }
884
885 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
886 {
887 __u8 status = *((__u8 *) skb->data);
888
889 BT_DBG("%s status 0x%2.2x", hdev->name, status);
890
891 if (status) {
892 hci_dev_lock(hdev);
893 mgmt_start_discovery_failed(hdev, status);
894 hci_dev_unlock(hdev);
895 return;
896 }
897 }
898
899 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
900 struct sk_buff *skb)
901 {
902 struct hci_cp_le_set_scan_enable *cp;
903 __u8 status = *((__u8 *) skb->data);
904
905 BT_DBG("%s status 0x%2.2x", hdev->name, status);
906
907 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
908 if (!cp)
909 return;
910
911 switch (cp->enable) {
912 case LE_SCANNING_ENABLED:
913 if (status) {
914 hci_dev_lock(hdev);
915 mgmt_start_discovery_failed(hdev, status);
916 hci_dev_unlock(hdev);
917 return;
918 }
919
920 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
921
922 hci_dev_lock(hdev);
923 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
924 hci_dev_unlock(hdev);
925 break;
926
927 case LE_SCANNING_DISABLED:
928 if (status) {
929 hci_dev_lock(hdev);
930 mgmt_stop_discovery_failed(hdev, status);
931 hci_dev_unlock(hdev);
932 return;
933 }
934
935 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
936
937 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
938 hdev->discovery.state == DISCOVERY_FINDING) {
939 mgmt_interleaved_discovery(hdev);
940 } else {
941 hci_dev_lock(hdev);
942 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
943 hci_dev_unlock(hdev);
944 }
945
946 break;
947
948 default:
949 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
950 break;
951 }
952 }
953
954 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
955 struct sk_buff *skb)
956 {
957 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
958
959 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
960
961 if (!rp->status)
962 hdev->le_white_list_size = rp->size;
963 }
964
965 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
966 struct sk_buff *skb)
967 {
968 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
969
970 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
971
972 if (!rp->status)
973 memcpy(hdev->le_states, rp->le_states, 8);
974 }
975
976 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
977 struct sk_buff *skb)
978 {
979 struct hci_cp_write_le_host_supported *sent;
980 __u8 status = *((__u8 *) skb->data);
981
982 BT_DBG("%s status 0x%2.2x", hdev->name, status);
983
984 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
985 if (!sent)
986 return;
987
988 if (!status) {
989 if (sent->le)
990 hdev->host_features[0] |= LMP_HOST_LE;
991 else
992 hdev->host_features[0] &= ~LMP_HOST_LE;
993
994 if (sent->simul)
995 hdev->host_features[0] |= LMP_HOST_LE_BREDR;
996 else
997 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
998 }
999
1000 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1001 !test_bit(HCI_INIT, &hdev->flags))
1002 mgmt_le_enable_complete(hdev, sent->le, status);
1003 }
1004
1005 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1006 struct sk_buff *skb)
1007 {
1008 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1009
1010 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1011 hdev->name, rp->status, rp->phy_handle);
1012
1013 if (rp->status)
1014 return;
1015
1016 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1017 }
1018
1019 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1020 {
1021 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1022
1023 if (status) {
1024 hci_conn_check_pending(hdev);
1025 hci_dev_lock(hdev);
1026 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1027 mgmt_start_discovery_failed(hdev, status);
1028 hci_dev_unlock(hdev);
1029 return;
1030 }
1031
1032 set_bit(HCI_INQUIRY, &hdev->flags);
1033
1034 hci_dev_lock(hdev);
1035 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1036 hci_dev_unlock(hdev);
1037 }
1038
1039 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1040 {
1041 struct hci_cp_create_conn *cp;
1042 struct hci_conn *conn;
1043
1044 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1045
1046 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1047 if (!cp)
1048 return;
1049
1050 hci_dev_lock(hdev);
1051
1052 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1053
1054 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1055
1056 if (status) {
1057 if (conn && conn->state == BT_CONNECT) {
1058 if (status != 0x0c || conn->attempt > 2) {
1059 conn->state = BT_CLOSED;
1060 hci_proto_connect_cfm(conn, status);
1061 hci_conn_del(conn);
1062 } else
1063 conn->state = BT_CONNECT2;
1064 }
1065 } else {
1066 if (!conn) {
1067 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1068 if (conn) {
1069 conn->out = true;
1070 conn->link_mode |= HCI_LM_MASTER;
1071 } else
1072 BT_ERR("No memory for new connection");
1073 }
1074 }
1075
1076 hci_dev_unlock(hdev);
1077 }
1078
1079 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1080 {
1081 struct hci_cp_add_sco *cp;
1082 struct hci_conn *acl, *sco;
1083 __u16 handle;
1084
1085 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1086
1087 if (!status)
1088 return;
1089
1090 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1091 if (!cp)
1092 return;
1093
1094 handle = __le16_to_cpu(cp->handle);
1095
1096 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1097
1098 hci_dev_lock(hdev);
1099
1100 acl = hci_conn_hash_lookup_handle(hdev, handle);
1101 if (acl) {
1102 sco = acl->link;
1103 if (sco) {
1104 sco->state = BT_CLOSED;
1105
1106 hci_proto_connect_cfm(sco, status);
1107 hci_conn_del(sco);
1108 }
1109 }
1110
1111 hci_dev_unlock(hdev);
1112 }
1113
1114 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1115 {
1116 struct hci_cp_auth_requested *cp;
1117 struct hci_conn *conn;
1118
1119 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1120
1121 if (!status)
1122 return;
1123
1124 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1125 if (!cp)
1126 return;
1127
1128 hci_dev_lock(hdev);
1129
1130 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1131 if (conn) {
1132 if (conn->state == BT_CONFIG) {
1133 hci_proto_connect_cfm(conn, status);
1134 hci_conn_put(conn);
1135 }
1136 }
1137
1138 hci_dev_unlock(hdev);
1139 }
1140
1141 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1142 {
1143 struct hci_cp_set_conn_encrypt *cp;
1144 struct hci_conn *conn;
1145
1146 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1147
1148 if (!status)
1149 return;
1150
1151 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1152 if (!cp)
1153 return;
1154
1155 hci_dev_lock(hdev);
1156
1157 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1158 if (conn) {
1159 if (conn->state == BT_CONFIG) {
1160 hci_proto_connect_cfm(conn, status);
1161 hci_conn_put(conn);
1162 }
1163 }
1164
1165 hci_dev_unlock(hdev);
1166 }
1167
1168 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1169 struct hci_conn *conn)
1170 {
1171 if (conn->state != BT_CONFIG || !conn->out)
1172 return 0;
1173
1174 if (conn->pending_sec_level == BT_SECURITY_SDP)
1175 return 0;
1176
1177 /* Only request authentication for SSP connections or non-SSP
1178 * devices with sec_level HIGH or if MITM protection is requested */
1179 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1180 conn->pending_sec_level != BT_SECURITY_HIGH)
1181 return 0;
1182
1183 return 1;
1184 }
1185
1186 static int hci_resolve_name(struct hci_dev *hdev,
1187 struct inquiry_entry *e)
1188 {
1189 struct hci_cp_remote_name_req cp;
1190
1191 memset(&cp, 0, sizeof(cp));
1192
1193 bacpy(&cp.bdaddr, &e->data.bdaddr);
1194 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1195 cp.pscan_mode = e->data.pscan_mode;
1196 cp.clock_offset = e->data.clock_offset;
1197
1198 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1199 }
1200
1201 static bool hci_resolve_next_name(struct hci_dev *hdev)
1202 {
1203 struct discovery_state *discov = &hdev->discovery;
1204 struct inquiry_entry *e;
1205
1206 if (list_empty(&discov->resolve))
1207 return false;
1208
1209 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1210 if (!e)
1211 return false;
1212
1213 if (hci_resolve_name(hdev, e) == 0) {
1214 e->name_state = NAME_PENDING;
1215 return true;
1216 }
1217
1218 return false;
1219 }
1220
1221 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1222 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1223 {
1224 struct discovery_state *discov = &hdev->discovery;
1225 struct inquiry_entry *e;
1226
1227 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1228 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1229 name_len, conn->dev_class);
1230
1231 if (discov->state == DISCOVERY_STOPPED)
1232 return;
1233
1234 if (discov->state == DISCOVERY_STOPPING)
1235 goto discov_complete;
1236
1237 if (discov->state != DISCOVERY_RESOLVING)
1238 return;
1239
1240 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1241 /* If the device was not found in a list of found devices names of which
1242 * are pending. there is no need to continue resolving a next name as it
1243 * will be done upon receiving another Remote Name Request Complete
1244 * Event */
1245 if (!e)
1246 return;
1247
1248 list_del(&e->list);
1249 if (name) {
1250 e->name_state = NAME_KNOWN;
1251 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1252 e->data.rssi, name, name_len);
1253 } else {
1254 e->name_state = NAME_NOT_KNOWN;
1255 }
1256
1257 if (hci_resolve_next_name(hdev))
1258 return;
1259
1260 discov_complete:
1261 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1262 }
1263
1264 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1265 {
1266 struct hci_cp_remote_name_req *cp;
1267 struct hci_conn *conn;
1268
1269 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1270
1271 /* If successful wait for the name req complete event before
1272 * checking for the need to do authentication */
1273 if (!status)
1274 return;
1275
1276 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1277 if (!cp)
1278 return;
1279
1280 hci_dev_lock(hdev);
1281
1282 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1283
1284 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1285 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1286
1287 if (!conn)
1288 goto unlock;
1289
1290 if (!hci_outgoing_auth_needed(hdev, conn))
1291 goto unlock;
1292
1293 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1294 struct hci_cp_auth_requested cp;
1295 cp.handle = __cpu_to_le16(conn->handle);
1296 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1297 }
1298
1299 unlock:
1300 hci_dev_unlock(hdev);
1301 }
1302
1303 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1304 {
1305 struct hci_cp_read_remote_features *cp;
1306 struct hci_conn *conn;
1307
1308 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1309
1310 if (!status)
1311 return;
1312
1313 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1314 if (!cp)
1315 return;
1316
1317 hci_dev_lock(hdev);
1318
1319 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1320 if (conn) {
1321 if (conn->state == BT_CONFIG) {
1322 hci_proto_connect_cfm(conn, status);
1323 hci_conn_put(conn);
1324 }
1325 }
1326
1327 hci_dev_unlock(hdev);
1328 }
1329
1330 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1331 {
1332 struct hci_cp_read_remote_ext_features *cp;
1333 struct hci_conn *conn;
1334
1335 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1336
1337 if (!status)
1338 return;
1339
1340 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1341 if (!cp)
1342 return;
1343
1344 hci_dev_lock(hdev);
1345
1346 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1347 if (conn) {
1348 if (conn->state == BT_CONFIG) {
1349 hci_proto_connect_cfm(conn, status);
1350 hci_conn_put(conn);
1351 }
1352 }
1353
1354 hci_dev_unlock(hdev);
1355 }
1356
1357 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1358 {
1359 struct hci_cp_setup_sync_conn *cp;
1360 struct hci_conn *acl, *sco;
1361 __u16 handle;
1362
1363 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1364
1365 if (!status)
1366 return;
1367
1368 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1369 if (!cp)
1370 return;
1371
1372 handle = __le16_to_cpu(cp->handle);
1373
1374 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1375
1376 hci_dev_lock(hdev);
1377
1378 acl = hci_conn_hash_lookup_handle(hdev, handle);
1379 if (acl) {
1380 sco = acl->link;
1381 if (sco) {
1382 sco->state = BT_CLOSED;
1383
1384 hci_proto_connect_cfm(sco, status);
1385 hci_conn_del(sco);
1386 }
1387 }
1388
1389 hci_dev_unlock(hdev);
1390 }
1391
1392 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1393 {
1394 struct hci_cp_sniff_mode *cp;
1395 struct hci_conn *conn;
1396
1397 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1398
1399 if (!status)
1400 return;
1401
1402 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1403 if (!cp)
1404 return;
1405
1406 hci_dev_lock(hdev);
1407
1408 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1409 if (conn) {
1410 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1411
1412 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1413 hci_sco_setup(conn, status);
1414 }
1415
1416 hci_dev_unlock(hdev);
1417 }
1418
1419 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1420 {
1421 struct hci_cp_exit_sniff_mode *cp;
1422 struct hci_conn *conn;
1423
1424 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1425
1426 if (!status)
1427 return;
1428
1429 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1430 if (!cp)
1431 return;
1432
1433 hci_dev_lock(hdev);
1434
1435 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1436 if (conn) {
1437 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1438
1439 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1440 hci_sco_setup(conn, status);
1441 }
1442
1443 hci_dev_unlock(hdev);
1444 }
1445
1446 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1447 {
1448 struct hci_cp_disconnect *cp;
1449 struct hci_conn *conn;
1450
1451 if (!status)
1452 return;
1453
1454 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1455 if (!cp)
1456 return;
1457
1458 hci_dev_lock(hdev);
1459
1460 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1461 if (conn)
1462 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1463 conn->dst_type, status);
1464
1465 hci_dev_unlock(hdev);
1466 }
1467
1468 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1469 {
1470 struct hci_conn *conn;
1471
1472 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1473
1474 if (status) {
1475 hci_dev_lock(hdev);
1476
1477 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1478 if (!conn) {
1479 hci_dev_unlock(hdev);
1480 return;
1481 }
1482
1483 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1484
1485 conn->state = BT_CLOSED;
1486 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1487 conn->dst_type, status);
1488 hci_proto_connect_cfm(conn, status);
1489 hci_conn_del(conn);
1490
1491 hci_dev_unlock(hdev);
1492 }
1493 }
1494
1495 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1496 {
1497 struct hci_cp_create_phy_link *cp;
1498
1499 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1500
1501 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1502 if (!cp)
1503 return;
1504
1505 hci_dev_lock(hdev);
1506
1507 if (status) {
1508 struct hci_conn *hcon;
1509
1510 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1511 if (hcon)
1512 hci_conn_del(hcon);
1513 } else {
1514 amp_write_remote_assoc(hdev, cp->phy_handle);
1515 }
1516
1517 hci_dev_unlock(hdev);
1518 }
1519
1520 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1521 {
1522 struct hci_cp_accept_phy_link *cp;
1523
1524 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1525
1526 if (status)
1527 return;
1528
1529 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1530 if (!cp)
1531 return;
1532
1533 amp_write_remote_assoc(hdev, cp->phy_handle);
1534 }
1535
1536 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1537 {
1538 __u8 status = *((__u8 *) skb->data);
1539 struct discovery_state *discov = &hdev->discovery;
1540 struct inquiry_entry *e;
1541
1542 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1543
1544 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status);
1545
1546 hci_conn_check_pending(hdev);
1547
1548 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1549 return;
1550
1551 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1552 return;
1553
1554 hci_dev_lock(hdev);
1555
1556 if (discov->state != DISCOVERY_FINDING)
1557 goto unlock;
1558
1559 if (list_empty(&discov->resolve)) {
1560 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1561 goto unlock;
1562 }
1563
1564 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1565 if (e && hci_resolve_name(hdev, e) == 0) {
1566 e->name_state = NAME_PENDING;
1567 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1568 } else {
1569 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1570 }
1571
1572 unlock:
1573 hci_dev_unlock(hdev);
1574 }
1575
1576 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1577 {
1578 struct inquiry_data data;
1579 struct inquiry_info *info = (void *) (skb->data + 1);
1580 int num_rsp = *((__u8 *) skb->data);
1581
1582 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1583
1584 if (!num_rsp)
1585 return;
1586
1587 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1588 return;
1589
1590 hci_dev_lock(hdev);
1591
1592 for (; num_rsp; num_rsp--, info++) {
1593 bool name_known, ssp;
1594
1595 bacpy(&data.bdaddr, &info->bdaddr);
1596 data.pscan_rep_mode = info->pscan_rep_mode;
1597 data.pscan_period_mode = info->pscan_period_mode;
1598 data.pscan_mode = info->pscan_mode;
1599 memcpy(data.dev_class, info->dev_class, 3);
1600 data.clock_offset = info->clock_offset;
1601 data.rssi = 0x00;
1602 data.ssp_mode = 0x00;
1603
1604 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1605 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1606 info->dev_class, 0, !name_known, ssp, NULL,
1607 0);
1608 }
1609
1610 hci_dev_unlock(hdev);
1611 }
1612
1613 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1614 {
1615 struct hci_ev_conn_complete *ev = (void *) skb->data;
1616 struct hci_conn *conn;
1617
1618 BT_DBG("%s", hdev->name);
1619
1620 hci_dev_lock(hdev);
1621
1622 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1623 if (!conn) {
1624 if (ev->link_type != SCO_LINK)
1625 goto unlock;
1626
1627 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1628 if (!conn)
1629 goto unlock;
1630
1631 conn->type = SCO_LINK;
1632 }
1633
1634 if (!ev->status) {
1635 conn->handle = __le16_to_cpu(ev->handle);
1636
1637 if (conn->type == ACL_LINK) {
1638 conn->state = BT_CONFIG;
1639 hci_conn_hold(conn);
1640
1641 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1642 !hci_find_link_key(hdev, &ev->bdaddr))
1643 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1644 else
1645 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1646 } else
1647 conn->state = BT_CONNECTED;
1648
1649 hci_conn_hold_device(conn);
1650 hci_conn_add_sysfs(conn);
1651
1652 if (test_bit(HCI_AUTH, &hdev->flags))
1653 conn->link_mode |= HCI_LM_AUTH;
1654
1655 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1656 conn->link_mode |= HCI_LM_ENCRYPT;
1657
1658 /* Get remote features */
1659 if (conn->type == ACL_LINK) {
1660 struct hci_cp_read_remote_features cp;
1661 cp.handle = ev->handle;
1662 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1663 sizeof(cp), &cp);
1664 }
1665
1666 /* Set packet type for incoming connection */
1667 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1668 struct hci_cp_change_conn_ptype cp;
1669 cp.handle = ev->handle;
1670 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1671 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1672 &cp);
1673 }
1674 } else {
1675 conn->state = BT_CLOSED;
1676 if (conn->type == ACL_LINK)
1677 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1678 conn->dst_type, ev->status);
1679 }
1680
1681 if (conn->type == ACL_LINK)
1682 hci_sco_setup(conn, ev->status);
1683
1684 if (ev->status) {
1685 hci_proto_connect_cfm(conn, ev->status);
1686 hci_conn_del(conn);
1687 } else if (ev->link_type != ACL_LINK)
1688 hci_proto_connect_cfm(conn, ev->status);
1689
1690 unlock:
1691 hci_dev_unlock(hdev);
1692
1693 hci_conn_check_pending(hdev);
1694 }
1695
1696 void hci_conn_accept(struct hci_conn *conn, int mask)
1697 {
1698 struct hci_dev *hdev = conn->hdev;
1699
1700 BT_DBG("conn %p", conn);
1701
1702 conn->state = BT_CONFIG;
1703
1704 if (!lmp_esco_capable(hdev)) {
1705 struct hci_cp_accept_conn_req cp;
1706
1707 bacpy(&cp.bdaddr, &conn->dst);
1708
1709 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1710 cp.role = 0x00; /* Become master */
1711 else
1712 cp.role = 0x01; /* Remain slave */
1713
1714 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
1715 } else /* lmp_esco_capable(hdev)) */ {
1716 struct hci_cp_accept_sync_conn_req cp;
1717
1718 bacpy(&cp.bdaddr, &conn->dst);
1719 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1720
1721 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1722 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1723 cp.max_latency = __constant_cpu_to_le16(0xffff);
1724 cp.content_format = cpu_to_le16(hdev->voice_setting);
1725 cp.retrans_effort = 0xff;
1726
1727 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1728 sizeof(cp), &cp);
1729 }
1730 }
1731
1732 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1733 {
1734 struct hci_ev_conn_request *ev = (void *) skb->data;
1735 int mask = hdev->link_mode;
1736 __u8 flags = 0;
1737
1738 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1739 ev->link_type);
1740
1741 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1742 &flags);
1743
1744 if ((mask & HCI_LM_ACCEPT) &&
1745 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1746 /* Connection accepted */
1747 struct inquiry_entry *ie;
1748 struct hci_conn *conn;
1749
1750 hci_dev_lock(hdev);
1751
1752 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1753 if (ie)
1754 memcpy(ie->data.dev_class, ev->dev_class, 3);
1755
1756 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1757 &ev->bdaddr);
1758 if (!conn) {
1759 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1760 if (!conn) {
1761 BT_ERR("No memory for new connection");
1762 hci_dev_unlock(hdev);
1763 return;
1764 }
1765 }
1766
1767 memcpy(conn->dev_class, ev->dev_class, 3);
1768
1769 hci_dev_unlock(hdev);
1770
1771 if (ev->link_type == ACL_LINK ||
1772 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1773 struct hci_cp_accept_conn_req cp;
1774 conn->state = BT_CONNECT;
1775
1776 bacpy(&cp.bdaddr, &ev->bdaddr);
1777
1778 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1779 cp.role = 0x00; /* Become master */
1780 else
1781 cp.role = 0x01; /* Remain slave */
1782
1783 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1784 &cp);
1785 } else if (!(flags & HCI_PROTO_DEFER)) {
1786 struct hci_cp_accept_sync_conn_req cp;
1787 conn->state = BT_CONNECT;
1788
1789 bacpy(&cp.bdaddr, &ev->bdaddr);
1790 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1791
1792 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1793 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1794 cp.max_latency = __constant_cpu_to_le16(0xffff);
1795 cp.content_format = cpu_to_le16(hdev->voice_setting);
1796 cp.retrans_effort = 0xff;
1797
1798 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1799 sizeof(cp), &cp);
1800 } else {
1801 conn->state = BT_CONNECT2;
1802 hci_proto_connect_cfm(conn, 0);
1803 hci_conn_put(conn);
1804 }
1805 } else {
1806 /* Connection rejected */
1807 struct hci_cp_reject_conn_req cp;
1808
1809 bacpy(&cp.bdaddr, &ev->bdaddr);
1810 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1811 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1812 }
1813 }
1814
1815 static u8 hci_to_mgmt_reason(u8 err)
1816 {
1817 switch (err) {
1818 case HCI_ERROR_CONNECTION_TIMEOUT:
1819 return MGMT_DEV_DISCONN_TIMEOUT;
1820 case HCI_ERROR_REMOTE_USER_TERM:
1821 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1822 case HCI_ERROR_REMOTE_POWER_OFF:
1823 return MGMT_DEV_DISCONN_REMOTE;
1824 case HCI_ERROR_LOCAL_HOST_TERM:
1825 return MGMT_DEV_DISCONN_LOCAL_HOST;
1826 default:
1827 return MGMT_DEV_DISCONN_UNKNOWN;
1828 }
1829 }
1830
1831 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1832 {
1833 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1834 struct hci_conn *conn;
1835
1836 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1837
1838 hci_dev_lock(hdev);
1839
1840 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1841 if (!conn)
1842 goto unlock;
1843
1844 if (ev->status == 0)
1845 conn->state = BT_CLOSED;
1846
1847 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1848 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1849 if (ev->status) {
1850 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1851 conn->dst_type, ev->status);
1852 } else {
1853 u8 reason = hci_to_mgmt_reason(ev->reason);
1854
1855 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1856 conn->dst_type, reason);
1857 }
1858 }
1859
1860 if (ev->status == 0) {
1861 if (conn->type == ACL_LINK && conn->flush_key)
1862 hci_remove_link_key(hdev, &conn->dst);
1863 hci_proto_disconn_cfm(conn, ev->reason);
1864 hci_conn_del(conn);
1865 }
1866
1867 unlock:
1868 hci_dev_unlock(hdev);
1869 }
1870
1871 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1872 {
1873 struct hci_ev_auth_complete *ev = (void *) skb->data;
1874 struct hci_conn *conn;
1875
1876 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1877
1878 hci_dev_lock(hdev);
1879
1880 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1881 if (!conn)
1882 goto unlock;
1883
1884 if (!ev->status) {
1885 if (!hci_conn_ssp_enabled(conn) &&
1886 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1887 BT_INFO("re-auth of legacy device is not possible.");
1888 } else {
1889 conn->link_mode |= HCI_LM_AUTH;
1890 conn->sec_level = conn->pending_sec_level;
1891 }
1892 } else {
1893 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1894 ev->status);
1895 }
1896
1897 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1898 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1899
1900 if (conn->state == BT_CONFIG) {
1901 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1902 struct hci_cp_set_conn_encrypt cp;
1903 cp.handle = ev->handle;
1904 cp.encrypt = 0x01;
1905 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1906 &cp);
1907 } else {
1908 conn->state = BT_CONNECTED;
1909 hci_proto_connect_cfm(conn, ev->status);
1910 hci_conn_put(conn);
1911 }
1912 } else {
1913 hci_auth_cfm(conn, ev->status);
1914
1915 hci_conn_hold(conn);
1916 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1917 hci_conn_put(conn);
1918 }
1919
1920 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1921 if (!ev->status) {
1922 struct hci_cp_set_conn_encrypt cp;
1923 cp.handle = ev->handle;
1924 cp.encrypt = 0x01;
1925 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1926 &cp);
1927 } else {
1928 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1929 hci_encrypt_cfm(conn, ev->status, 0x00);
1930 }
1931 }
1932
1933 unlock:
1934 hci_dev_unlock(hdev);
1935 }
1936
1937 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1938 {
1939 struct hci_ev_remote_name *ev = (void *) skb->data;
1940 struct hci_conn *conn;
1941
1942 BT_DBG("%s", hdev->name);
1943
1944 hci_conn_check_pending(hdev);
1945
1946 hci_dev_lock(hdev);
1947
1948 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1949
1950 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1951 goto check_auth;
1952
1953 if (ev->status == 0)
1954 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1955 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1956 else
1957 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1958
1959 check_auth:
1960 if (!conn)
1961 goto unlock;
1962
1963 if (!hci_outgoing_auth_needed(hdev, conn))
1964 goto unlock;
1965
1966 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1967 struct hci_cp_auth_requested cp;
1968 cp.handle = __cpu_to_le16(conn->handle);
1969 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1970 }
1971
1972 unlock:
1973 hci_dev_unlock(hdev);
1974 }
1975
1976 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1977 {
1978 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1979 struct hci_conn *conn;
1980
1981 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1982
1983 hci_dev_lock(hdev);
1984
1985 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1986 if (conn) {
1987 if (!ev->status) {
1988 if (ev->encrypt) {
1989 /* Encryption implies authentication */
1990 conn->link_mode |= HCI_LM_AUTH;
1991 conn->link_mode |= HCI_LM_ENCRYPT;
1992 conn->sec_level = conn->pending_sec_level;
1993 } else
1994 conn->link_mode &= ~HCI_LM_ENCRYPT;
1995 }
1996
1997 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1998
1999 if (ev->status && conn->state == BT_CONNECTED) {
2000 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2001 hci_conn_put(conn);
2002 goto unlock;
2003 }
2004
2005 if (conn->state == BT_CONFIG) {
2006 if (!ev->status)
2007 conn->state = BT_CONNECTED;
2008
2009 hci_proto_connect_cfm(conn, ev->status);
2010 hci_conn_put(conn);
2011 } else
2012 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2013 }
2014
2015 unlock:
2016 hci_dev_unlock(hdev);
2017 }
2018
2019 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2020 struct sk_buff *skb)
2021 {
2022 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2023 struct hci_conn *conn;
2024
2025 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2026
2027 hci_dev_lock(hdev);
2028
2029 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2030 if (conn) {
2031 if (!ev->status)
2032 conn->link_mode |= HCI_LM_SECURE;
2033
2034 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2035
2036 hci_key_change_cfm(conn, ev->status);
2037 }
2038
2039 hci_dev_unlock(hdev);
2040 }
2041
2042 static void hci_remote_features_evt(struct hci_dev *hdev,
2043 struct sk_buff *skb)
2044 {
2045 struct hci_ev_remote_features *ev = (void *) skb->data;
2046 struct hci_conn *conn;
2047
2048 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2049
2050 hci_dev_lock(hdev);
2051
2052 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2053 if (!conn)
2054 goto unlock;
2055
2056 if (!ev->status)
2057 memcpy(conn->features, ev->features, 8);
2058
2059 if (conn->state != BT_CONFIG)
2060 goto unlock;
2061
2062 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2063 struct hci_cp_read_remote_ext_features cp;
2064 cp.handle = ev->handle;
2065 cp.page = 0x01;
2066 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2067 sizeof(cp), &cp);
2068 goto unlock;
2069 }
2070
2071 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2072 struct hci_cp_remote_name_req cp;
2073 memset(&cp, 0, sizeof(cp));
2074 bacpy(&cp.bdaddr, &conn->dst);
2075 cp.pscan_rep_mode = 0x02;
2076 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2077 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2078 mgmt_device_connected(hdev, &conn->dst, conn->type,
2079 conn->dst_type, 0, NULL, 0,
2080 conn->dev_class);
2081
2082 if (!hci_outgoing_auth_needed(hdev, conn)) {
2083 conn->state = BT_CONNECTED;
2084 hci_proto_connect_cfm(conn, ev->status);
2085 hci_conn_put(conn);
2086 }
2087
2088 unlock:
2089 hci_dev_unlock(hdev);
2090 }
2091
2092 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2093 {
2094 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2095 u8 status = skb->data[sizeof(*ev)];
2096 __u16 opcode;
2097
2098 skb_pull(skb, sizeof(*ev));
2099
2100 opcode = __le16_to_cpu(ev->opcode);
2101
2102 switch (opcode) {
2103 case HCI_OP_INQUIRY_CANCEL:
2104 hci_cc_inquiry_cancel(hdev, skb);
2105 break;
2106
2107 case HCI_OP_PERIODIC_INQ:
2108 hci_cc_periodic_inq(hdev, skb);
2109 break;
2110
2111 case HCI_OP_EXIT_PERIODIC_INQ:
2112 hci_cc_exit_periodic_inq(hdev, skb);
2113 break;
2114
2115 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2116 hci_cc_remote_name_req_cancel(hdev, skb);
2117 break;
2118
2119 case HCI_OP_ROLE_DISCOVERY:
2120 hci_cc_role_discovery(hdev, skb);
2121 break;
2122
2123 case HCI_OP_READ_LINK_POLICY:
2124 hci_cc_read_link_policy(hdev, skb);
2125 break;
2126
2127 case HCI_OP_WRITE_LINK_POLICY:
2128 hci_cc_write_link_policy(hdev, skb);
2129 break;
2130
2131 case HCI_OP_READ_DEF_LINK_POLICY:
2132 hci_cc_read_def_link_policy(hdev, skb);
2133 break;
2134
2135 case HCI_OP_WRITE_DEF_LINK_POLICY:
2136 hci_cc_write_def_link_policy(hdev, skb);
2137 break;
2138
2139 case HCI_OP_RESET:
2140 hci_cc_reset(hdev, skb);
2141 break;
2142
2143 case HCI_OP_WRITE_LOCAL_NAME:
2144 hci_cc_write_local_name(hdev, skb);
2145 break;
2146
2147 case HCI_OP_READ_LOCAL_NAME:
2148 hci_cc_read_local_name(hdev, skb);
2149 break;
2150
2151 case HCI_OP_WRITE_AUTH_ENABLE:
2152 hci_cc_write_auth_enable(hdev, skb);
2153 break;
2154
2155 case HCI_OP_WRITE_ENCRYPT_MODE:
2156 hci_cc_write_encrypt_mode(hdev, skb);
2157 break;
2158
2159 case HCI_OP_WRITE_SCAN_ENABLE:
2160 hci_cc_write_scan_enable(hdev, skb);
2161 break;
2162
2163 case HCI_OP_READ_CLASS_OF_DEV:
2164 hci_cc_read_class_of_dev(hdev, skb);
2165 break;
2166
2167 case HCI_OP_WRITE_CLASS_OF_DEV:
2168 hci_cc_write_class_of_dev(hdev, skb);
2169 break;
2170
2171 case HCI_OP_READ_VOICE_SETTING:
2172 hci_cc_read_voice_setting(hdev, skb);
2173 break;
2174
2175 case HCI_OP_WRITE_VOICE_SETTING:
2176 hci_cc_write_voice_setting(hdev, skb);
2177 break;
2178
2179 case HCI_OP_WRITE_SSP_MODE:
2180 hci_cc_write_ssp_mode(hdev, skb);
2181 break;
2182
2183 case HCI_OP_READ_LOCAL_VERSION:
2184 hci_cc_read_local_version(hdev, skb);
2185 break;
2186
2187 case HCI_OP_READ_LOCAL_COMMANDS:
2188 hci_cc_read_local_commands(hdev, skb);
2189 break;
2190
2191 case HCI_OP_READ_LOCAL_FEATURES:
2192 hci_cc_read_local_features(hdev, skb);
2193 break;
2194
2195 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2196 hci_cc_read_local_ext_features(hdev, skb);
2197 break;
2198
2199 case HCI_OP_READ_BUFFER_SIZE:
2200 hci_cc_read_buffer_size(hdev, skb);
2201 break;
2202
2203 case HCI_OP_READ_BD_ADDR:
2204 hci_cc_read_bd_addr(hdev, skb);
2205 break;
2206
2207 case HCI_OP_READ_DATA_BLOCK_SIZE:
2208 hci_cc_read_data_block_size(hdev, skb);
2209 break;
2210
2211 case HCI_OP_READ_FLOW_CONTROL_MODE:
2212 hci_cc_read_flow_control_mode(hdev, skb);
2213 break;
2214
2215 case HCI_OP_READ_LOCAL_AMP_INFO:
2216 hci_cc_read_local_amp_info(hdev, skb);
2217 break;
2218
2219 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2220 hci_cc_read_local_amp_assoc(hdev, skb);
2221 break;
2222
2223 case HCI_OP_READ_INQ_RSP_TX_POWER:
2224 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2225 break;
2226
2227 case HCI_OP_PIN_CODE_REPLY:
2228 hci_cc_pin_code_reply(hdev, skb);
2229 break;
2230
2231 case HCI_OP_PIN_CODE_NEG_REPLY:
2232 hci_cc_pin_code_neg_reply(hdev, skb);
2233 break;
2234
2235 case HCI_OP_READ_LOCAL_OOB_DATA:
2236 hci_cc_read_local_oob_data_reply(hdev, skb);
2237 break;
2238
2239 case HCI_OP_LE_READ_BUFFER_SIZE:
2240 hci_cc_le_read_buffer_size(hdev, skb);
2241 break;
2242
2243 case HCI_OP_LE_READ_LOCAL_FEATURES:
2244 hci_cc_le_read_local_features(hdev, skb);
2245 break;
2246
2247 case HCI_OP_LE_READ_ADV_TX_POWER:
2248 hci_cc_le_read_adv_tx_power(hdev, skb);
2249 break;
2250
2251 case HCI_OP_USER_CONFIRM_REPLY:
2252 hci_cc_user_confirm_reply(hdev, skb);
2253 break;
2254
2255 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2256 hci_cc_user_confirm_neg_reply(hdev, skb);
2257 break;
2258
2259 case HCI_OP_USER_PASSKEY_REPLY:
2260 hci_cc_user_passkey_reply(hdev, skb);
2261 break;
2262
2263 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2264 hci_cc_user_passkey_neg_reply(hdev, skb);
2265 break;
2266
2267 case HCI_OP_LE_SET_SCAN_PARAM:
2268 hci_cc_le_set_scan_param(hdev, skb);
2269 break;
2270
2271 case HCI_OP_LE_SET_ADV_ENABLE:
2272 hci_cc_le_set_adv_enable(hdev, skb);
2273 break;
2274
2275 case HCI_OP_LE_SET_SCAN_ENABLE:
2276 hci_cc_le_set_scan_enable(hdev, skb);
2277 break;
2278
2279 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2280 hci_cc_le_read_white_list_size(hdev, skb);
2281 break;
2282
2283 case HCI_OP_LE_READ_SUPPORTED_STATES:
2284 hci_cc_le_read_supported_states(hdev, skb);
2285 break;
2286
2287 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2288 hci_cc_write_le_host_supported(hdev, skb);
2289 break;
2290
2291 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2292 hci_cc_write_remote_amp_assoc(hdev, skb);
2293 break;
2294
2295 default:
2296 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2297 break;
2298 }
2299
2300 if (opcode != HCI_OP_NOP)
2301 del_timer(&hdev->cmd_timer);
2302
2303 hci_req_cmd_complete(hdev, opcode, status);
2304
2305 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2306 atomic_set(&hdev->cmd_cnt, 1);
2307 if (!skb_queue_empty(&hdev->cmd_q))
2308 queue_work(hdev->workqueue, &hdev->cmd_work);
2309 }
2310 }
2311
2312 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2313 {
2314 struct hci_ev_cmd_status *ev = (void *) skb->data;
2315 __u16 opcode;
2316
2317 skb_pull(skb, sizeof(*ev));
2318
2319 opcode = __le16_to_cpu(ev->opcode);
2320
2321 switch (opcode) {
2322 case HCI_OP_INQUIRY:
2323 hci_cs_inquiry(hdev, ev->status);
2324 break;
2325
2326 case HCI_OP_CREATE_CONN:
2327 hci_cs_create_conn(hdev, ev->status);
2328 break;
2329
2330 case HCI_OP_ADD_SCO:
2331 hci_cs_add_sco(hdev, ev->status);
2332 break;
2333
2334 case HCI_OP_AUTH_REQUESTED:
2335 hci_cs_auth_requested(hdev, ev->status);
2336 break;
2337
2338 case HCI_OP_SET_CONN_ENCRYPT:
2339 hci_cs_set_conn_encrypt(hdev, ev->status);
2340 break;
2341
2342 case HCI_OP_REMOTE_NAME_REQ:
2343 hci_cs_remote_name_req(hdev, ev->status);
2344 break;
2345
2346 case HCI_OP_READ_REMOTE_FEATURES:
2347 hci_cs_read_remote_features(hdev, ev->status);
2348 break;
2349
2350 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2351 hci_cs_read_remote_ext_features(hdev, ev->status);
2352 break;
2353
2354 case HCI_OP_SETUP_SYNC_CONN:
2355 hci_cs_setup_sync_conn(hdev, ev->status);
2356 break;
2357
2358 case HCI_OP_SNIFF_MODE:
2359 hci_cs_sniff_mode(hdev, ev->status);
2360 break;
2361
2362 case HCI_OP_EXIT_SNIFF_MODE:
2363 hci_cs_exit_sniff_mode(hdev, ev->status);
2364 break;
2365
2366 case HCI_OP_DISCONNECT:
2367 hci_cs_disconnect(hdev, ev->status);
2368 break;
2369
2370 case HCI_OP_LE_CREATE_CONN:
2371 hci_cs_le_create_conn(hdev, ev->status);
2372 break;
2373
2374 case HCI_OP_CREATE_PHY_LINK:
2375 hci_cs_create_phylink(hdev, ev->status);
2376 break;
2377
2378 case HCI_OP_ACCEPT_PHY_LINK:
2379 hci_cs_accept_phylink(hdev, ev->status);
2380 break;
2381
2382 default:
2383 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2384 break;
2385 }
2386
2387 if (opcode != HCI_OP_NOP)
2388 del_timer(&hdev->cmd_timer);
2389
2390 hci_req_cmd_status(hdev, opcode, ev->status);
2391
2392 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2393 atomic_set(&hdev->cmd_cnt, 1);
2394 if (!skb_queue_empty(&hdev->cmd_q))
2395 queue_work(hdev->workqueue, &hdev->cmd_work);
2396 }
2397 }
2398
2399 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2400 {
2401 struct hci_ev_role_change *ev = (void *) skb->data;
2402 struct hci_conn *conn;
2403
2404 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2405
2406 hci_dev_lock(hdev);
2407
2408 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2409 if (conn) {
2410 if (!ev->status) {
2411 if (ev->role)
2412 conn->link_mode &= ~HCI_LM_MASTER;
2413 else
2414 conn->link_mode |= HCI_LM_MASTER;
2415 }
2416
2417 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2418
2419 hci_role_switch_cfm(conn, ev->status, ev->role);
2420 }
2421
2422 hci_dev_unlock(hdev);
2423 }
2424
2425 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2426 {
2427 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2428 int i;
2429
2430 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2431 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2432 return;
2433 }
2434
2435 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2436 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2437 BT_DBG("%s bad parameters", hdev->name);
2438 return;
2439 }
2440
2441 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2442
2443 for (i = 0; i < ev->num_hndl; i++) {
2444 struct hci_comp_pkts_info *info = &ev->handles[i];
2445 struct hci_conn *conn;
2446 __u16 handle, count;
2447
2448 handle = __le16_to_cpu(info->handle);
2449 count = __le16_to_cpu(info->count);
2450
2451 conn = hci_conn_hash_lookup_handle(hdev, handle);
2452 if (!conn)
2453 continue;
2454
2455 conn->sent -= count;
2456
2457 switch (conn->type) {
2458 case ACL_LINK:
2459 hdev->acl_cnt += count;
2460 if (hdev->acl_cnt > hdev->acl_pkts)
2461 hdev->acl_cnt = hdev->acl_pkts;
2462 break;
2463
2464 case LE_LINK:
2465 if (hdev->le_pkts) {
2466 hdev->le_cnt += count;
2467 if (hdev->le_cnt > hdev->le_pkts)
2468 hdev->le_cnt = hdev->le_pkts;
2469 } else {
2470 hdev->acl_cnt += count;
2471 if (hdev->acl_cnt > hdev->acl_pkts)
2472 hdev->acl_cnt = hdev->acl_pkts;
2473 }
2474 break;
2475
2476 case SCO_LINK:
2477 hdev->sco_cnt += count;
2478 if (hdev->sco_cnt > hdev->sco_pkts)
2479 hdev->sco_cnt = hdev->sco_pkts;
2480 break;
2481
2482 default:
2483 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2484 break;
2485 }
2486 }
2487
2488 queue_work(hdev->workqueue, &hdev->tx_work);
2489 }
2490
2491 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2492 __u16 handle)
2493 {
2494 struct hci_chan *chan;
2495
2496 switch (hdev->dev_type) {
2497 case HCI_BREDR:
2498 return hci_conn_hash_lookup_handle(hdev, handle);
2499 case HCI_AMP:
2500 chan = hci_chan_lookup_handle(hdev, handle);
2501 if (chan)
2502 return chan->conn;
2503 break;
2504 default:
2505 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2506 break;
2507 }
2508
2509 return NULL;
2510 }
2511
2512 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2513 {
2514 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2515 int i;
2516
2517 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2518 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2519 return;
2520 }
2521
2522 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2523 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2524 BT_DBG("%s bad parameters", hdev->name);
2525 return;
2526 }
2527
2528 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2529 ev->num_hndl);
2530
2531 for (i = 0; i < ev->num_hndl; i++) {
2532 struct hci_comp_blocks_info *info = &ev->handles[i];
2533 struct hci_conn *conn = NULL;
2534 __u16 handle, block_count;
2535
2536 handle = __le16_to_cpu(info->handle);
2537 block_count = __le16_to_cpu(info->blocks);
2538
2539 conn = __hci_conn_lookup_handle(hdev, handle);
2540 if (!conn)
2541 continue;
2542
2543 conn->sent -= block_count;
2544
2545 switch (conn->type) {
2546 case ACL_LINK:
2547 case AMP_LINK:
2548 hdev->block_cnt += block_count;
2549 if (hdev->block_cnt > hdev->num_blocks)
2550 hdev->block_cnt = hdev->num_blocks;
2551 break;
2552
2553 default:
2554 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2555 break;
2556 }
2557 }
2558
2559 queue_work(hdev->workqueue, &hdev->tx_work);
2560 }
2561
2562 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2563 {
2564 struct hci_ev_mode_change *ev = (void *) skb->data;
2565 struct hci_conn *conn;
2566
2567 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2568
2569 hci_dev_lock(hdev);
2570
2571 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2572 if (conn) {
2573 conn->mode = ev->mode;
2574 conn->interval = __le16_to_cpu(ev->interval);
2575
2576 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2577 &conn->flags)) {
2578 if (conn->mode == HCI_CM_ACTIVE)
2579 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2580 else
2581 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2582 }
2583
2584 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2585 hci_sco_setup(conn, ev->status);
2586 }
2587
2588 hci_dev_unlock(hdev);
2589 }
2590
2591 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2592 {
2593 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2594 struct hci_conn *conn;
2595
2596 BT_DBG("%s", hdev->name);
2597
2598 hci_dev_lock(hdev);
2599
2600 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2601 if (!conn)
2602 goto unlock;
2603
2604 if (conn->state == BT_CONNECTED) {
2605 hci_conn_hold(conn);
2606 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2607 hci_conn_put(conn);
2608 }
2609
2610 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2611 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2612 sizeof(ev->bdaddr), &ev->bdaddr);
2613 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2614 u8 secure;
2615
2616 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2617 secure = 1;
2618 else
2619 secure = 0;
2620
2621 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2622 }
2623
2624 unlock:
2625 hci_dev_unlock(hdev);
2626 }
2627
2628 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2629 {
2630 struct hci_ev_link_key_req *ev = (void *) skb->data;
2631 struct hci_cp_link_key_reply cp;
2632 struct hci_conn *conn;
2633 struct link_key *key;
2634
2635 BT_DBG("%s", hdev->name);
2636
2637 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2638 return;
2639
2640 hci_dev_lock(hdev);
2641
2642 key = hci_find_link_key(hdev, &ev->bdaddr);
2643 if (!key) {
2644 BT_DBG("%s link key not found for %pMR", hdev->name,
2645 &ev->bdaddr);
2646 goto not_found;
2647 }
2648
2649 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2650 &ev->bdaddr);
2651
2652 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2653 key->type == HCI_LK_DEBUG_COMBINATION) {
2654 BT_DBG("%s ignoring debug key", hdev->name);
2655 goto not_found;
2656 }
2657
2658 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2659 if (conn) {
2660 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2661 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2662 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2663 goto not_found;
2664 }
2665
2666 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2667 conn->pending_sec_level == BT_SECURITY_HIGH) {
2668 BT_DBG("%s ignoring key unauthenticated for high security",
2669 hdev->name);
2670 goto not_found;
2671 }
2672
2673 conn->key_type = key->type;
2674 conn->pin_length = key->pin_len;
2675 }
2676
2677 bacpy(&cp.bdaddr, &ev->bdaddr);
2678 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2679
2680 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2681
2682 hci_dev_unlock(hdev);
2683
2684 return;
2685
2686 not_found:
2687 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2688 hci_dev_unlock(hdev);
2689 }
2690
2691 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2692 {
2693 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2694 struct hci_conn *conn;
2695 u8 pin_len = 0;
2696
2697 BT_DBG("%s", hdev->name);
2698
2699 hci_dev_lock(hdev);
2700
2701 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2702 if (conn) {
2703 hci_conn_hold(conn);
2704 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2705 pin_len = conn->pin_length;
2706
2707 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2708 conn->key_type = ev->key_type;
2709
2710 hci_conn_put(conn);
2711 }
2712
2713 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2714 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2715 ev->key_type, pin_len);
2716
2717 hci_dev_unlock(hdev);
2718 }
2719
2720 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2721 {
2722 struct hci_ev_clock_offset *ev = (void *) skb->data;
2723 struct hci_conn *conn;
2724
2725 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2726
2727 hci_dev_lock(hdev);
2728
2729 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2730 if (conn && !ev->status) {
2731 struct inquiry_entry *ie;
2732
2733 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2734 if (ie) {
2735 ie->data.clock_offset = ev->clock_offset;
2736 ie->timestamp = jiffies;
2737 }
2738 }
2739
2740 hci_dev_unlock(hdev);
2741 }
2742
2743 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2744 {
2745 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2746 struct hci_conn *conn;
2747
2748 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2749
2750 hci_dev_lock(hdev);
2751
2752 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2753 if (conn && !ev->status)
2754 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2755
2756 hci_dev_unlock(hdev);
2757 }
2758
2759 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2760 {
2761 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2762 struct inquiry_entry *ie;
2763
2764 BT_DBG("%s", hdev->name);
2765
2766 hci_dev_lock(hdev);
2767
2768 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2769 if (ie) {
2770 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2771 ie->timestamp = jiffies;
2772 }
2773
2774 hci_dev_unlock(hdev);
2775 }
2776
2777 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2778 struct sk_buff *skb)
2779 {
2780 struct inquiry_data data;
2781 int num_rsp = *((__u8 *) skb->data);
2782 bool name_known, ssp;
2783
2784 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2785
2786 if (!num_rsp)
2787 return;
2788
2789 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2790 return;
2791
2792 hci_dev_lock(hdev);
2793
2794 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2795 struct inquiry_info_with_rssi_and_pscan_mode *info;
2796 info = (void *) (skb->data + 1);
2797
2798 for (; num_rsp; num_rsp--, info++) {
2799 bacpy(&data.bdaddr, &info->bdaddr);
2800 data.pscan_rep_mode = info->pscan_rep_mode;
2801 data.pscan_period_mode = info->pscan_period_mode;
2802 data.pscan_mode = info->pscan_mode;
2803 memcpy(data.dev_class, info->dev_class, 3);
2804 data.clock_offset = info->clock_offset;
2805 data.rssi = info->rssi;
2806 data.ssp_mode = 0x00;
2807
2808 name_known = hci_inquiry_cache_update(hdev, &data,
2809 false, &ssp);
2810 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2811 info->dev_class, info->rssi,
2812 !name_known, ssp, NULL, 0);
2813 }
2814 } else {
2815 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2816
2817 for (; num_rsp; num_rsp--, info++) {
2818 bacpy(&data.bdaddr, &info->bdaddr);
2819 data.pscan_rep_mode = info->pscan_rep_mode;
2820 data.pscan_period_mode = info->pscan_period_mode;
2821 data.pscan_mode = 0x00;
2822 memcpy(data.dev_class, info->dev_class, 3);
2823 data.clock_offset = info->clock_offset;
2824 data.rssi = info->rssi;
2825 data.ssp_mode = 0x00;
2826 name_known = hci_inquiry_cache_update(hdev, &data,
2827 false, &ssp);
2828 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2829 info->dev_class, info->rssi,
2830 !name_known, ssp, NULL, 0);
2831 }
2832 }
2833
2834 hci_dev_unlock(hdev);
2835 }
2836
2837 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2838 struct sk_buff *skb)
2839 {
2840 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2841 struct hci_conn *conn;
2842
2843 BT_DBG("%s", hdev->name);
2844
2845 hci_dev_lock(hdev);
2846
2847 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2848 if (!conn)
2849 goto unlock;
2850
2851 if (!ev->status && ev->page == 0x01) {
2852 struct inquiry_entry *ie;
2853
2854 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2855 if (ie)
2856 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2857
2858 if (ev->features[0] & LMP_HOST_SSP)
2859 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2860 }
2861
2862 if (conn->state != BT_CONFIG)
2863 goto unlock;
2864
2865 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2866 struct hci_cp_remote_name_req cp;
2867 memset(&cp, 0, sizeof(cp));
2868 bacpy(&cp.bdaddr, &conn->dst);
2869 cp.pscan_rep_mode = 0x02;
2870 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2871 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2872 mgmt_device_connected(hdev, &conn->dst, conn->type,
2873 conn->dst_type, 0, NULL, 0,
2874 conn->dev_class);
2875
2876 if (!hci_outgoing_auth_needed(hdev, conn)) {
2877 conn->state = BT_CONNECTED;
2878 hci_proto_connect_cfm(conn, ev->status);
2879 hci_conn_put(conn);
2880 }
2881
2882 unlock:
2883 hci_dev_unlock(hdev);
2884 }
2885
2886 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2887 struct sk_buff *skb)
2888 {
2889 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2890 struct hci_conn *conn;
2891
2892 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2893
2894 hci_dev_lock(hdev);
2895
2896 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2897 if (!conn) {
2898 if (ev->link_type == ESCO_LINK)
2899 goto unlock;
2900
2901 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2902 if (!conn)
2903 goto unlock;
2904
2905 conn->type = SCO_LINK;
2906 }
2907
2908 switch (ev->status) {
2909 case 0x00:
2910 conn->handle = __le16_to_cpu(ev->handle);
2911 conn->state = BT_CONNECTED;
2912
2913 hci_conn_hold_device(conn);
2914 hci_conn_add_sysfs(conn);
2915 break;
2916
2917 case 0x11: /* Unsupported Feature or Parameter Value */
2918 case 0x1c: /* SCO interval rejected */
2919 case 0x1a: /* Unsupported Remote Feature */
2920 case 0x1f: /* Unspecified error */
2921 if (conn->out && conn->attempt < 2) {
2922 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2923 (hdev->esco_type & EDR_ESCO_MASK);
2924 hci_setup_sync(conn, conn->link->handle);
2925 goto unlock;
2926 }
2927 /* fall through */
2928
2929 default:
2930 conn->state = BT_CLOSED;
2931 break;
2932 }
2933
2934 hci_proto_connect_cfm(conn, ev->status);
2935 if (ev->status)
2936 hci_conn_del(conn);
2937
2938 unlock:
2939 hci_dev_unlock(hdev);
2940 }
2941
2942 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2943 struct sk_buff *skb)
2944 {
2945 struct inquiry_data data;
2946 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2947 int num_rsp = *((__u8 *) skb->data);
2948 size_t eir_len;
2949
2950 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2951
2952 if (!num_rsp)
2953 return;
2954
2955 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2956 return;
2957
2958 hci_dev_lock(hdev);
2959
2960 for (; num_rsp; num_rsp--, info++) {
2961 bool name_known, ssp;
2962
2963 bacpy(&data.bdaddr, &info->bdaddr);
2964 data.pscan_rep_mode = info->pscan_rep_mode;
2965 data.pscan_period_mode = info->pscan_period_mode;
2966 data.pscan_mode = 0x00;
2967 memcpy(data.dev_class, info->dev_class, 3);
2968 data.clock_offset = info->clock_offset;
2969 data.rssi = info->rssi;
2970 data.ssp_mode = 0x01;
2971
2972 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2973 name_known = eir_has_data_type(info->data,
2974 sizeof(info->data),
2975 EIR_NAME_COMPLETE);
2976 else
2977 name_known = true;
2978
2979 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2980 &ssp);
2981 eir_len = eir_get_length(info->data, sizeof(info->data));
2982 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2983 info->dev_class, info->rssi, !name_known,
2984 ssp, info->data, eir_len);
2985 }
2986
2987 hci_dev_unlock(hdev);
2988 }
2989
2990 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
2991 struct sk_buff *skb)
2992 {
2993 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
2994 struct hci_conn *conn;
2995
2996 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
2997 __le16_to_cpu(ev->handle));
2998
2999 hci_dev_lock(hdev);
3000
3001 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3002 if (!conn)
3003 goto unlock;
3004
3005 if (!ev->status)
3006 conn->sec_level = conn->pending_sec_level;
3007
3008 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3009
3010 if (ev->status && conn->state == BT_CONNECTED) {
3011 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3012 hci_conn_put(conn);
3013 goto unlock;
3014 }
3015
3016 if (conn->state == BT_CONFIG) {
3017 if (!ev->status)
3018 conn->state = BT_CONNECTED;
3019
3020 hci_proto_connect_cfm(conn, ev->status);
3021 hci_conn_put(conn);
3022 } else {
3023 hci_auth_cfm(conn, ev->status);
3024
3025 hci_conn_hold(conn);
3026 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3027 hci_conn_put(conn);
3028 }
3029
3030 unlock:
3031 hci_dev_unlock(hdev);
3032 }
3033
3034 static u8 hci_get_auth_req(struct hci_conn *conn)
3035 {
3036 /* If remote requests dedicated bonding follow that lead */
3037 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3038 /* If both remote and local IO capabilities allow MITM
3039 * protection then require it, otherwise don't */
3040 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3041 return 0x02;
3042 else
3043 return 0x03;
3044 }
3045
3046 /* If remote requests no-bonding follow that lead */
3047 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3048 return conn->remote_auth | (conn->auth_type & 0x01);
3049
3050 return conn->auth_type;
3051 }
3052
3053 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3054 {
3055 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3056 struct hci_conn *conn;
3057
3058 BT_DBG("%s", hdev->name);
3059
3060 hci_dev_lock(hdev);
3061
3062 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3063 if (!conn)
3064 goto unlock;
3065
3066 hci_conn_hold(conn);
3067
3068 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3069 goto unlock;
3070
3071 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3072 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3073 struct hci_cp_io_capability_reply cp;
3074
3075 bacpy(&cp.bdaddr, &ev->bdaddr);
3076 /* Change the IO capability from KeyboardDisplay
3077 * to DisplayYesNo as it is not supported by BT spec. */
3078 cp.capability = (conn->io_capability == 0x04) ?
3079 0x01 : conn->io_capability;
3080 conn->auth_type = hci_get_auth_req(conn);
3081 cp.authentication = conn->auth_type;
3082
3083 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3084 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3085 cp.oob_data = 0x01;
3086 else
3087 cp.oob_data = 0x00;
3088
3089 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3090 sizeof(cp), &cp);
3091 } else {
3092 struct hci_cp_io_capability_neg_reply cp;
3093
3094 bacpy(&cp.bdaddr, &ev->bdaddr);
3095 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3096
3097 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3098 sizeof(cp), &cp);
3099 }
3100
3101 unlock:
3102 hci_dev_unlock(hdev);
3103 }
3104
3105 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3106 {
3107 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3108 struct hci_conn *conn;
3109
3110 BT_DBG("%s", hdev->name);
3111
3112 hci_dev_lock(hdev);
3113
3114 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3115 if (!conn)
3116 goto unlock;
3117
3118 conn->remote_cap = ev->capability;
3119 conn->remote_auth = ev->authentication;
3120 if (ev->oob_data)
3121 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3122
3123 unlock:
3124 hci_dev_unlock(hdev);
3125 }
3126
3127 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3128 struct sk_buff *skb)
3129 {
3130 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3131 int loc_mitm, rem_mitm, confirm_hint = 0;
3132 struct hci_conn *conn;
3133
3134 BT_DBG("%s", hdev->name);
3135
3136 hci_dev_lock(hdev);
3137
3138 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3139 goto unlock;
3140
3141 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3142 if (!conn)
3143 goto unlock;
3144
3145 loc_mitm = (conn->auth_type & 0x01);
3146 rem_mitm = (conn->remote_auth & 0x01);
3147
3148 /* If we require MITM but the remote device can't provide that
3149 * (it has NoInputNoOutput) then reject the confirmation
3150 * request. The only exception is when we're dedicated bonding
3151 * initiators (connect_cfm_cb set) since then we always have the MITM
3152 * bit set. */
3153 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3154 BT_DBG("Rejecting request: remote device can't provide MITM");
3155 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3156 sizeof(ev->bdaddr), &ev->bdaddr);
3157 goto unlock;
3158 }
3159
3160 /* If no side requires MITM protection; auto-accept */
3161 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3162 (!rem_mitm || conn->io_capability == 0x03)) {
3163
3164 /* If we're not the initiators request authorization to
3165 * proceed from user space (mgmt_user_confirm with
3166 * confirm_hint set to 1). */
3167 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3168 BT_DBG("Confirming auto-accept as acceptor");
3169 confirm_hint = 1;
3170 goto confirm;
3171 }
3172
3173 BT_DBG("Auto-accept of user confirmation with %ums delay",
3174 hdev->auto_accept_delay);
3175
3176 if (hdev->auto_accept_delay > 0) {
3177 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3178 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3179 goto unlock;
3180 }
3181
3182 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3183 sizeof(ev->bdaddr), &ev->bdaddr);
3184 goto unlock;
3185 }
3186
3187 confirm:
3188 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3189 confirm_hint);
3190
3191 unlock:
3192 hci_dev_unlock(hdev);
3193 }
3194
3195 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3196 struct sk_buff *skb)
3197 {
3198 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3199
3200 BT_DBG("%s", hdev->name);
3201
3202 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3203 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3204 }
3205
3206 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3207 struct sk_buff *skb)
3208 {
3209 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3210 struct hci_conn *conn;
3211
3212 BT_DBG("%s", hdev->name);
3213
3214 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3215 if (!conn)
3216 return;
3217
3218 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3219 conn->passkey_entered = 0;
3220
3221 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3222 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3223 conn->dst_type, conn->passkey_notify,
3224 conn->passkey_entered);
3225 }
3226
3227 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3228 {
3229 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3230 struct hci_conn *conn;
3231
3232 BT_DBG("%s", hdev->name);
3233
3234 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3235 if (!conn)
3236 return;
3237
3238 switch (ev->type) {
3239 case HCI_KEYPRESS_STARTED:
3240 conn->passkey_entered = 0;
3241 return;
3242
3243 case HCI_KEYPRESS_ENTERED:
3244 conn->passkey_entered++;
3245 break;
3246
3247 case HCI_KEYPRESS_ERASED:
3248 conn->passkey_entered--;
3249 break;
3250
3251 case HCI_KEYPRESS_CLEARED:
3252 conn->passkey_entered = 0;
3253 break;
3254
3255 case HCI_KEYPRESS_COMPLETED:
3256 return;
3257 }
3258
3259 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3260 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3261 conn->dst_type, conn->passkey_notify,
3262 conn->passkey_entered);
3263 }
3264
3265 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3266 struct sk_buff *skb)
3267 {
3268 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3269 struct hci_conn *conn;
3270
3271 BT_DBG("%s", hdev->name);
3272
3273 hci_dev_lock(hdev);
3274
3275 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3276 if (!conn)
3277 goto unlock;
3278
3279 /* To avoid duplicate auth_failed events to user space we check
3280 * the HCI_CONN_AUTH_PEND flag which will be set if we
3281 * initiated the authentication. A traditional auth_complete
3282 * event gets always produced as initiator and is also mapped to
3283 * the mgmt_auth_failed event */
3284 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3285 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3286 ev->status);
3287
3288 hci_conn_put(conn);
3289
3290 unlock:
3291 hci_dev_unlock(hdev);
3292 }
3293
3294 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3295 struct sk_buff *skb)
3296 {
3297 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3298 struct inquiry_entry *ie;
3299
3300 BT_DBG("%s", hdev->name);
3301
3302 hci_dev_lock(hdev);
3303
3304 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3305 if (ie)
3306 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3307
3308 hci_dev_unlock(hdev);
3309 }
3310
3311 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3312 struct sk_buff *skb)
3313 {
3314 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3315 struct oob_data *data;
3316
3317 BT_DBG("%s", hdev->name);
3318
3319 hci_dev_lock(hdev);
3320
3321 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3322 goto unlock;
3323
3324 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3325 if (data) {
3326 struct hci_cp_remote_oob_data_reply cp;
3327
3328 bacpy(&cp.bdaddr, &ev->bdaddr);
3329 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3330 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3331
3332 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3333 &cp);
3334 } else {
3335 struct hci_cp_remote_oob_data_neg_reply cp;
3336
3337 bacpy(&cp.bdaddr, &ev->bdaddr);
3338 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3339 &cp);
3340 }
3341
3342 unlock:
3343 hci_dev_unlock(hdev);
3344 }
3345
3346 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3347 struct sk_buff *skb)
3348 {
3349 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3350 struct hci_conn *hcon, *bredr_hcon;
3351
3352 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3353 ev->status);
3354
3355 hci_dev_lock(hdev);
3356
3357 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3358 if (!hcon) {
3359 hci_dev_unlock(hdev);
3360 return;
3361 }
3362
3363 if (ev->status) {
3364 hci_conn_del(hcon);
3365 hci_dev_unlock(hdev);
3366 return;
3367 }
3368
3369 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3370
3371 hcon->state = BT_CONNECTED;
3372 bacpy(&hcon->dst, &bredr_hcon->dst);
3373
3374 hci_conn_hold(hcon);
3375 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3376 hci_conn_put(hcon);
3377
3378 hci_conn_hold_device(hcon);
3379 hci_conn_add_sysfs(hcon);
3380
3381 amp_physical_cfm(bredr_hcon, hcon);
3382
3383 hci_dev_unlock(hdev);
3384 }
3385
3386 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3387 {
3388 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3389 struct hci_conn *hcon;
3390 struct hci_chan *hchan;
3391 struct amp_mgr *mgr;
3392
3393 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3394 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3395 ev->status);
3396
3397 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3398 if (!hcon)
3399 return;
3400
3401 /* Create AMP hchan */
3402 hchan = hci_chan_create(hcon);
3403 if (!hchan)
3404 return;
3405
3406 hchan->handle = le16_to_cpu(ev->handle);
3407
3408 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3409
3410 mgr = hcon->amp_mgr;
3411 if (mgr && mgr->bredr_chan) {
3412 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3413
3414 l2cap_chan_lock(bredr_chan);
3415
3416 bredr_chan->conn->mtu = hdev->block_mtu;
3417 l2cap_logical_cfm(bredr_chan, hchan, 0);
3418 hci_conn_hold(hcon);
3419
3420 l2cap_chan_unlock(bredr_chan);
3421 }
3422 }
3423
3424 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3425 struct sk_buff *skb)
3426 {
3427 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3428 struct hci_chan *hchan;
3429
3430 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3431 le16_to_cpu(ev->handle), ev->status);
3432
3433 if (ev->status)
3434 return;
3435
3436 hci_dev_lock(hdev);
3437
3438 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3439 if (!hchan)
3440 goto unlock;
3441
3442 amp_destroy_logical_link(hchan, ev->reason);
3443
3444 unlock:
3445 hci_dev_unlock(hdev);
3446 }
3447
3448 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3449 struct sk_buff *skb)
3450 {
3451 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3452 struct hci_conn *hcon;
3453
3454 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3455
3456 if (ev->status)
3457 return;
3458
3459 hci_dev_lock(hdev);
3460
3461 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3462 if (hcon) {
3463 hcon->state = BT_CLOSED;
3464 hci_conn_del(hcon);
3465 }
3466
3467 hci_dev_unlock(hdev);
3468 }
3469
3470 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3471 {
3472 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3473 struct hci_conn *conn;
3474
3475 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3476
3477 hci_dev_lock(hdev);
3478
3479 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3480 if (!conn) {
3481 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3482 if (!conn) {
3483 BT_ERR("No memory for new connection");
3484 goto unlock;
3485 }
3486
3487 conn->dst_type = ev->bdaddr_type;
3488
3489 if (ev->role == LE_CONN_ROLE_MASTER) {
3490 conn->out = true;
3491 conn->link_mode |= HCI_LM_MASTER;
3492 }
3493 }
3494
3495 if (ev->status) {
3496 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3497 conn->dst_type, ev->status);
3498 hci_proto_connect_cfm(conn, ev->status);
3499 conn->state = BT_CLOSED;
3500 hci_conn_del(conn);
3501 goto unlock;
3502 }
3503
3504 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3505 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3506 conn->dst_type, 0, NULL, 0, NULL);
3507
3508 conn->sec_level = BT_SECURITY_LOW;
3509 conn->handle = __le16_to_cpu(ev->handle);
3510 conn->state = BT_CONNECTED;
3511
3512 hci_conn_hold_device(conn);
3513 hci_conn_add_sysfs(conn);
3514
3515 hci_proto_connect_cfm(conn, ev->status);
3516
3517 unlock:
3518 hci_dev_unlock(hdev);
3519 }
3520
3521 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3522 {
3523 u8 num_reports = skb->data[0];
3524 void *ptr = &skb->data[1];
3525 s8 rssi;
3526
3527 while (num_reports--) {
3528 struct hci_ev_le_advertising_info *ev = ptr;
3529
3530 rssi = ev->data[ev->length];
3531 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3532 NULL, rssi, 0, 1, ev->data, ev->length);
3533
3534 ptr += sizeof(*ev) + ev->length + 1;
3535 }
3536 }
3537
3538 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3539 {
3540 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3541 struct hci_cp_le_ltk_reply cp;
3542 struct hci_cp_le_ltk_neg_reply neg;
3543 struct hci_conn *conn;
3544 struct smp_ltk *ltk;
3545
3546 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3547
3548 hci_dev_lock(hdev);
3549
3550 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3551 if (conn == NULL)
3552 goto not_found;
3553
3554 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3555 if (ltk == NULL)
3556 goto not_found;
3557
3558 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3559 cp.handle = cpu_to_le16(conn->handle);
3560
3561 if (ltk->authenticated)
3562 conn->sec_level = BT_SECURITY_HIGH;
3563
3564 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3565
3566 if (ltk->type & HCI_SMP_STK) {
3567 list_del(&ltk->list);
3568 kfree(ltk);
3569 }
3570
3571 hci_dev_unlock(hdev);
3572
3573 return;
3574
3575 not_found:
3576 neg.handle = ev->handle;
3577 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3578 hci_dev_unlock(hdev);
3579 }
3580
3581 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3582 {
3583 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3584
3585 skb_pull(skb, sizeof(*le_ev));
3586
3587 switch (le_ev->subevent) {
3588 case HCI_EV_LE_CONN_COMPLETE:
3589 hci_le_conn_complete_evt(hdev, skb);
3590 break;
3591
3592 case HCI_EV_LE_ADVERTISING_REPORT:
3593 hci_le_adv_report_evt(hdev, skb);
3594 break;
3595
3596 case HCI_EV_LE_LTK_REQ:
3597 hci_le_ltk_request_evt(hdev, skb);
3598 break;
3599
3600 default:
3601 break;
3602 }
3603 }
3604
3605 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3606 {
3607 struct hci_ev_channel_selected *ev = (void *) skb->data;
3608 struct hci_conn *hcon;
3609
3610 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3611
3612 skb_pull(skb, sizeof(*ev));
3613
3614 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3615 if (!hcon)
3616 return;
3617
3618 amp_read_loc_assoc_final_data(hdev, hcon);
3619 }
3620
3621 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3622 {
3623 struct hci_event_hdr *hdr = (void *) skb->data;
3624 __u8 event = hdr->evt;
3625
3626 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3627
3628 switch (event) {
3629 case HCI_EV_INQUIRY_COMPLETE:
3630 hci_inquiry_complete_evt(hdev, skb);
3631 break;
3632
3633 case HCI_EV_INQUIRY_RESULT:
3634 hci_inquiry_result_evt(hdev, skb);
3635 break;
3636
3637 case HCI_EV_CONN_COMPLETE:
3638 hci_conn_complete_evt(hdev, skb);
3639 break;
3640
3641 case HCI_EV_CONN_REQUEST:
3642 hci_conn_request_evt(hdev, skb);
3643 break;
3644
3645 case HCI_EV_DISCONN_COMPLETE:
3646 hci_disconn_complete_evt(hdev, skb);
3647 break;
3648
3649 case HCI_EV_AUTH_COMPLETE:
3650 hci_auth_complete_evt(hdev, skb);
3651 break;
3652
3653 case HCI_EV_REMOTE_NAME:
3654 hci_remote_name_evt(hdev, skb);
3655 break;
3656
3657 case HCI_EV_ENCRYPT_CHANGE:
3658 hci_encrypt_change_evt(hdev, skb);
3659 break;
3660
3661 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3662 hci_change_link_key_complete_evt(hdev, skb);
3663 break;
3664
3665 case HCI_EV_REMOTE_FEATURES:
3666 hci_remote_features_evt(hdev, skb);
3667 break;
3668
3669 case HCI_EV_CMD_COMPLETE:
3670 hci_cmd_complete_evt(hdev, skb);
3671 break;
3672
3673 case HCI_EV_CMD_STATUS:
3674 hci_cmd_status_evt(hdev, skb);
3675 break;
3676
3677 case HCI_EV_ROLE_CHANGE:
3678 hci_role_change_evt(hdev, skb);
3679 break;
3680
3681 case HCI_EV_NUM_COMP_PKTS:
3682 hci_num_comp_pkts_evt(hdev, skb);
3683 break;
3684
3685 case HCI_EV_MODE_CHANGE:
3686 hci_mode_change_evt(hdev, skb);
3687 break;
3688
3689 case HCI_EV_PIN_CODE_REQ:
3690 hci_pin_code_request_evt(hdev, skb);
3691 break;
3692
3693 case HCI_EV_LINK_KEY_REQ:
3694 hci_link_key_request_evt(hdev, skb);
3695 break;
3696
3697 case HCI_EV_LINK_KEY_NOTIFY:
3698 hci_link_key_notify_evt(hdev, skb);
3699 break;
3700
3701 case HCI_EV_CLOCK_OFFSET:
3702 hci_clock_offset_evt(hdev, skb);
3703 break;
3704
3705 case HCI_EV_PKT_TYPE_CHANGE:
3706 hci_pkt_type_change_evt(hdev, skb);
3707 break;
3708
3709 case HCI_EV_PSCAN_REP_MODE:
3710 hci_pscan_rep_mode_evt(hdev, skb);
3711 break;
3712
3713 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3714 hci_inquiry_result_with_rssi_evt(hdev, skb);
3715 break;
3716
3717 case HCI_EV_REMOTE_EXT_FEATURES:
3718 hci_remote_ext_features_evt(hdev, skb);
3719 break;
3720
3721 case HCI_EV_SYNC_CONN_COMPLETE:
3722 hci_sync_conn_complete_evt(hdev, skb);
3723 break;
3724
3725 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3726 hci_extended_inquiry_result_evt(hdev, skb);
3727 break;
3728
3729 case HCI_EV_KEY_REFRESH_COMPLETE:
3730 hci_key_refresh_complete_evt(hdev, skb);
3731 break;
3732
3733 case HCI_EV_IO_CAPA_REQUEST:
3734 hci_io_capa_request_evt(hdev, skb);
3735 break;
3736
3737 case HCI_EV_IO_CAPA_REPLY:
3738 hci_io_capa_reply_evt(hdev, skb);
3739 break;
3740
3741 case HCI_EV_USER_CONFIRM_REQUEST:
3742 hci_user_confirm_request_evt(hdev, skb);
3743 break;
3744
3745 case HCI_EV_USER_PASSKEY_REQUEST:
3746 hci_user_passkey_request_evt(hdev, skb);
3747 break;
3748
3749 case HCI_EV_USER_PASSKEY_NOTIFY:
3750 hci_user_passkey_notify_evt(hdev, skb);
3751 break;
3752
3753 case HCI_EV_KEYPRESS_NOTIFY:
3754 hci_keypress_notify_evt(hdev, skb);
3755 break;
3756
3757 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3758 hci_simple_pair_complete_evt(hdev, skb);
3759 break;
3760
3761 case HCI_EV_REMOTE_HOST_FEATURES:
3762 hci_remote_host_features_evt(hdev, skb);
3763 break;
3764
3765 case HCI_EV_LE_META:
3766 hci_le_meta_evt(hdev, skb);
3767 break;
3768
3769 case HCI_EV_CHANNEL_SELECTED:
3770 hci_chan_selected_evt(hdev, skb);
3771 break;
3772
3773 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3774 hci_remote_oob_data_request_evt(hdev, skb);
3775 break;
3776
3777 case HCI_EV_PHY_LINK_COMPLETE:
3778 hci_phy_link_complete_evt(hdev, skb);
3779 break;
3780
3781 case HCI_EV_LOGICAL_LINK_COMPLETE:
3782 hci_loglink_complete_evt(hdev, skb);
3783 break;
3784
3785 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3786 hci_disconn_loglink_complete_evt(hdev, skb);
3787 break;
3788
3789 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3790 hci_disconn_phylink_complete_evt(hdev, skb);
3791 break;
3792
3793 case HCI_EV_NUM_COMP_BLOCKS:
3794 hci_num_comp_blocks_evt(hdev, skb);
3795 break;
3796
3797 default:
3798 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3799 break;
3800 }
3801
3802 kfree_skb(skb);
3803 hdev->stat.evt_rx++;
3804 }