Bluetooth: Remove useless HCI_PENDING_CLASS flag
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
34
35 /* Handle HCI Event packets */
36
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 __u8 status = *((__u8 *) skb->data);
40
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
42
43 if (status) {
44 hci_dev_lock(hdev);
45 mgmt_stop_discovery_failed(hdev, status);
46 hci_dev_unlock(hdev);
47 return;
48 }
49
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status);
57
58 hci_conn_check_pending(hdev);
59 }
60
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 {
63 __u8 status = *((__u8 *) skb->data);
64
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
66
67 if (status)
68 return;
69
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 }
72
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 __u8 status = *((__u8 *) skb->data);
76
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78
79 if (status)
80 return;
81
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83
84 hci_conn_check_pending(hdev);
85 }
86
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct sk_buff *skb)
161 {
162 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163
164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165
166 if (rp->status)
167 return;
168
169 hdev->link_policy = __le16_to_cpu(rp->policy);
170 }
171
172 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 struct sk_buff *skb)
174 {
175 __u8 status = *((__u8 *) skb->data);
176 void *sent;
177
178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
179
180 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 if (!sent)
182 return;
183
184 if (!status)
185 hdev->link_policy = get_unaligned_le16(sent);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 /* Reset all non-persistent flags */
197 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ));
198
199 hdev->discovery.state = DISCOVERY_STOPPED;
200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202
203 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 hdev->adv_data_len = 0;
205 }
206
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208 {
209 __u8 status = *((__u8 *) skb->data);
210 void *sent;
211
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
213
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215 if (!sent)
216 return;
217
218 hci_dev_lock(hdev);
219
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
222 else if (!status)
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224
225 hci_dev_unlock(hdev);
226
227 if (!status && !test_bit(HCI_INIT, &hdev->flags))
228 hci_update_ad(hdev);
229 }
230
231 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
232 {
233 struct hci_rp_read_local_name *rp = (void *) skb->data;
234
235 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
236
237 if (rp->status)
238 return;
239
240 if (test_bit(HCI_SETUP, &hdev->dev_flags))
241 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
242 }
243
244 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
245 {
246 __u8 status = *((__u8 *) skb->data);
247 void *sent;
248
249 BT_DBG("%s status 0x%2.2x", hdev->name, status);
250
251 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
252 if (!sent)
253 return;
254
255 if (!status) {
256 __u8 param = *((__u8 *) sent);
257
258 if (param == AUTH_ENABLED)
259 set_bit(HCI_AUTH, &hdev->flags);
260 else
261 clear_bit(HCI_AUTH, &hdev->flags);
262 }
263
264 if (test_bit(HCI_MGMT, &hdev->dev_flags))
265 mgmt_auth_enable_complete(hdev, status);
266 }
267
268 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
269 {
270 __u8 status = *((__u8 *) skb->data);
271 void *sent;
272
273 BT_DBG("%s status 0x%2.2x", hdev->name, status);
274
275 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
276 if (!sent)
277 return;
278
279 if (!status) {
280 __u8 param = *((__u8 *) sent);
281
282 if (param)
283 set_bit(HCI_ENCRYPT, &hdev->flags);
284 else
285 clear_bit(HCI_ENCRYPT, &hdev->flags);
286 }
287 }
288
289 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
290 {
291 __u8 param, status = *((__u8 *) skb->data);
292 int old_pscan, old_iscan;
293 void *sent;
294
295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
296
297 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
298 if (!sent)
299 return;
300
301 param = *((__u8 *) sent);
302
303 hci_dev_lock(hdev);
304
305 if (status) {
306 mgmt_write_scan_failed(hdev, param, status);
307 hdev->discov_timeout = 0;
308 goto done;
309 }
310
311 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
312 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
313
314 if (param & SCAN_INQUIRY) {
315 set_bit(HCI_ISCAN, &hdev->flags);
316 if (!old_iscan)
317 mgmt_discoverable(hdev, 1);
318 if (hdev->discov_timeout > 0) {
319 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
320 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
321 to);
322 }
323 } else if (old_iscan)
324 mgmt_discoverable(hdev, 0);
325
326 if (param & SCAN_PAGE) {
327 set_bit(HCI_PSCAN, &hdev->flags);
328 if (!old_pscan)
329 mgmt_connectable(hdev, 1);
330 } else if (old_pscan)
331 mgmt_connectable(hdev, 0);
332
333 done:
334 hci_dev_unlock(hdev);
335 }
336
337 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
338 {
339 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
340
341 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
342
343 if (rp->status)
344 return;
345
346 memcpy(hdev->dev_class, rp->dev_class, 3);
347
348 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
349 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
350 }
351
352 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
353 {
354 __u8 status = *((__u8 *) skb->data);
355 void *sent;
356
357 BT_DBG("%s status 0x%2.2x", hdev->name, status);
358
359 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
360 if (!sent)
361 return;
362
363 hci_dev_lock(hdev);
364
365 if (status == 0)
366 memcpy(hdev->dev_class, sent, 3);
367
368 if (test_bit(HCI_MGMT, &hdev->dev_flags))
369 mgmt_set_class_of_dev_complete(hdev, sent, status);
370
371 hci_dev_unlock(hdev);
372 }
373
374 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
375 {
376 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
377 __u16 setting;
378
379 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
380
381 if (rp->status)
382 return;
383
384 setting = __le16_to_cpu(rp->voice_setting);
385
386 if (hdev->voice_setting == setting)
387 return;
388
389 hdev->voice_setting = setting;
390
391 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
392
393 if (hdev->notify)
394 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
395 }
396
397 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
398 struct sk_buff *skb)
399 {
400 __u8 status = *((__u8 *) skb->data);
401 __u16 setting;
402 void *sent;
403
404 BT_DBG("%s status 0x%2.2x", hdev->name, status);
405
406 if (status)
407 return;
408
409 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
410 if (!sent)
411 return;
412
413 setting = get_unaligned_le16(sent);
414
415 if (hdev->voice_setting == setting)
416 return;
417
418 hdev->voice_setting = setting;
419
420 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
421
422 if (hdev->notify)
423 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
424 }
425
426 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
427 {
428 __u8 status = *((__u8 *) skb->data);
429 struct hci_cp_write_ssp_mode *sent;
430
431 BT_DBG("%s status 0x%2.2x", hdev->name, status);
432
433 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
434 if (!sent)
435 return;
436
437 if (!status) {
438 if (sent->mode)
439 hdev->host_features[0] |= LMP_HOST_SSP;
440 else
441 hdev->host_features[0] &= ~LMP_HOST_SSP;
442 }
443
444 if (test_bit(HCI_MGMT, &hdev->dev_flags))
445 mgmt_ssp_enable_complete(hdev, sent->mode, status);
446 else if (!status) {
447 if (sent->mode)
448 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
449 else
450 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
451 }
452 }
453
454 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
455 {
456 struct hci_rp_read_local_version *rp = (void *) skb->data;
457
458 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
459
460 if (rp->status)
461 return;
462
463 hdev->hci_ver = rp->hci_ver;
464 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
465 hdev->lmp_ver = rp->lmp_ver;
466 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
467 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
468
469 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
470 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
471 }
472
473 static void hci_cc_read_local_commands(struct hci_dev *hdev,
474 struct sk_buff *skb)
475 {
476 struct hci_rp_read_local_commands *rp = (void *) skb->data;
477
478 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
479
480 if (!rp->status)
481 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
482 }
483
484 static void hci_cc_read_local_features(struct hci_dev *hdev,
485 struct sk_buff *skb)
486 {
487 struct hci_rp_read_local_features *rp = (void *) skb->data;
488
489 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
490
491 if (rp->status)
492 return;
493
494 memcpy(hdev->features, rp->features, 8);
495
496 /* Adjust default settings according to features
497 * supported by device. */
498
499 if (hdev->features[0] & LMP_3SLOT)
500 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
501
502 if (hdev->features[0] & LMP_5SLOT)
503 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
504
505 if (hdev->features[1] & LMP_HV2) {
506 hdev->pkt_type |= (HCI_HV2);
507 hdev->esco_type |= (ESCO_HV2);
508 }
509
510 if (hdev->features[1] & LMP_HV3) {
511 hdev->pkt_type |= (HCI_HV3);
512 hdev->esco_type |= (ESCO_HV3);
513 }
514
515 if (lmp_esco_capable(hdev))
516 hdev->esco_type |= (ESCO_EV3);
517
518 if (hdev->features[4] & LMP_EV4)
519 hdev->esco_type |= (ESCO_EV4);
520
521 if (hdev->features[4] & LMP_EV5)
522 hdev->esco_type |= (ESCO_EV5);
523
524 if (hdev->features[5] & LMP_EDR_ESCO_2M)
525 hdev->esco_type |= (ESCO_2EV3);
526
527 if (hdev->features[5] & LMP_EDR_ESCO_3M)
528 hdev->esco_type |= (ESCO_3EV3);
529
530 if (hdev->features[5] & LMP_EDR_3S_ESCO)
531 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
532
533 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
534 hdev->features[0], hdev->features[1],
535 hdev->features[2], hdev->features[3],
536 hdev->features[4], hdev->features[5],
537 hdev->features[6], hdev->features[7]);
538 }
539
540 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
541 struct sk_buff *skb)
542 {
543 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
544
545 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
546
547 if (rp->status)
548 return;
549
550 switch (rp->page) {
551 case 0:
552 memcpy(hdev->features, rp->features, 8);
553 break;
554 case 1:
555 memcpy(hdev->host_features, rp->features, 8);
556 break;
557 }
558 }
559
560 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
561 struct sk_buff *skb)
562 {
563 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
564
565 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
566
567 if (!rp->status)
568 hdev->flow_ctl_mode = rp->mode;
569 }
570
571 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
572 {
573 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
574
575 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
576
577 if (rp->status)
578 return;
579
580 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
581 hdev->sco_mtu = rp->sco_mtu;
582 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
583 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
584
585 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
586 hdev->sco_mtu = 64;
587 hdev->sco_pkts = 8;
588 }
589
590 hdev->acl_cnt = hdev->acl_pkts;
591 hdev->sco_cnt = hdev->sco_pkts;
592
593 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
594 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
595 }
596
597 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
598 {
599 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
600
601 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
602
603 if (!rp->status)
604 bacpy(&hdev->bdaddr, &rp->bdaddr);
605 }
606
607 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
608 struct sk_buff *skb)
609 {
610 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
611
612 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613
614 if (rp->status)
615 return;
616
617 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
618 hdev->block_len = __le16_to_cpu(rp->block_len);
619 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
620
621 hdev->block_cnt = hdev->num_blocks;
622
623 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
624 hdev->block_cnt, hdev->block_len);
625 }
626
627 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
628 struct sk_buff *skb)
629 {
630 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
631
632 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633
634 if (rp->status)
635 goto a2mp_rsp;
636
637 hdev->amp_status = rp->amp_status;
638 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
639 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
640 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
641 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
642 hdev->amp_type = rp->amp_type;
643 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
644 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
645 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
646 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
647
648 a2mp_rsp:
649 a2mp_send_getinfo_rsp(hdev);
650 }
651
652 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
653 struct sk_buff *skb)
654 {
655 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
656 struct amp_assoc *assoc = &hdev->loc_assoc;
657 size_t rem_len, frag_len;
658
659 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
660
661 if (rp->status)
662 goto a2mp_rsp;
663
664 frag_len = skb->len - sizeof(*rp);
665 rem_len = __le16_to_cpu(rp->rem_len);
666
667 if (rem_len > frag_len) {
668 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
669
670 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
671 assoc->offset += frag_len;
672
673 /* Read other fragments */
674 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
675
676 return;
677 }
678
679 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
680 assoc->len = assoc->offset + rem_len;
681 assoc->offset = 0;
682
683 a2mp_rsp:
684 /* Send A2MP Rsp when all fragments are received */
685 a2mp_send_getampassoc_rsp(hdev, rp->status);
686 a2mp_send_create_phy_link_req(hdev, rp->status);
687 }
688
689 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
690 struct sk_buff *skb)
691 {
692 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
693
694 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
695
696 if (!rp->status)
697 hdev->inq_tx_power = rp->tx_power;
698 }
699
700 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
701 {
702 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
703 struct hci_cp_pin_code_reply *cp;
704 struct hci_conn *conn;
705
706 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
707
708 hci_dev_lock(hdev);
709
710 if (test_bit(HCI_MGMT, &hdev->dev_flags))
711 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
712
713 if (rp->status)
714 goto unlock;
715
716 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
717 if (!cp)
718 goto unlock;
719
720 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
721 if (conn)
722 conn->pin_length = cp->pin_len;
723
724 unlock:
725 hci_dev_unlock(hdev);
726 }
727
728 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
729 {
730 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
731
732 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
733
734 hci_dev_lock(hdev);
735
736 if (test_bit(HCI_MGMT, &hdev->dev_flags))
737 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
738 rp->status);
739
740 hci_dev_unlock(hdev);
741 }
742
743 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
744 struct sk_buff *skb)
745 {
746 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
747
748 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
749
750 if (rp->status)
751 return;
752
753 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
754 hdev->le_pkts = rp->le_max_pkt;
755
756 hdev->le_cnt = hdev->le_pkts;
757
758 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
759 }
760
761 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
762 struct sk_buff *skb)
763 {
764 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
765
766 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
767
768 if (!rp->status)
769 memcpy(hdev->le_features, rp->features, 8);
770 }
771
772 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
773 struct sk_buff *skb)
774 {
775 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
776
777 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
778
779 if (!rp->status) {
780 hdev->adv_tx_power = rp->tx_power;
781 if (!test_bit(HCI_INIT, &hdev->flags))
782 hci_update_ad(hdev);
783 }
784 }
785
786 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
787 {
788 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
789
790 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
791
792 hci_dev_lock(hdev);
793
794 if (test_bit(HCI_MGMT, &hdev->dev_flags))
795 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
796 rp->status);
797
798 hci_dev_unlock(hdev);
799 }
800
801 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
802 struct sk_buff *skb)
803 {
804 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
805
806 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
807
808 hci_dev_lock(hdev);
809
810 if (test_bit(HCI_MGMT, &hdev->dev_flags))
811 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
812 ACL_LINK, 0, rp->status);
813
814 hci_dev_unlock(hdev);
815 }
816
817 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
818 {
819 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
820
821 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
822
823 hci_dev_lock(hdev);
824
825 if (test_bit(HCI_MGMT, &hdev->dev_flags))
826 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
827 0, rp->status);
828
829 hci_dev_unlock(hdev);
830 }
831
832 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
833 struct sk_buff *skb)
834 {
835 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
836
837 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
838
839 hci_dev_lock(hdev);
840
841 if (test_bit(HCI_MGMT, &hdev->dev_flags))
842 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
843 ACL_LINK, 0, rp->status);
844
845 hci_dev_unlock(hdev);
846 }
847
848 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
849 struct sk_buff *skb)
850 {
851 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
852
853 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
854
855 hci_dev_lock(hdev);
856 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
857 rp->randomizer, rp->status);
858 hci_dev_unlock(hdev);
859 }
860
861 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
862 {
863 __u8 *sent, status = *((__u8 *) skb->data);
864
865 BT_DBG("%s status 0x%2.2x", hdev->name, status);
866
867 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
868 if (!sent)
869 return;
870
871 hci_dev_lock(hdev);
872
873 if (!status) {
874 if (*sent)
875 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
876 else
877 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
878 }
879
880 hci_dev_unlock(hdev);
881
882 if (!test_bit(HCI_INIT, &hdev->flags))
883 hci_update_ad(hdev);
884 }
885
886 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
887 {
888 __u8 status = *((__u8 *) skb->data);
889
890 BT_DBG("%s status 0x%2.2x", hdev->name, status);
891
892 if (status) {
893 hci_dev_lock(hdev);
894 mgmt_start_discovery_failed(hdev, status);
895 hci_dev_unlock(hdev);
896 return;
897 }
898 }
899
900 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
901 struct sk_buff *skb)
902 {
903 struct hci_cp_le_set_scan_enable *cp;
904 __u8 status = *((__u8 *) skb->data);
905
906 BT_DBG("%s status 0x%2.2x", hdev->name, status);
907
908 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
909 if (!cp)
910 return;
911
912 switch (cp->enable) {
913 case LE_SCANNING_ENABLED:
914 if (status) {
915 hci_dev_lock(hdev);
916 mgmt_start_discovery_failed(hdev, status);
917 hci_dev_unlock(hdev);
918 return;
919 }
920
921 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
922
923 hci_dev_lock(hdev);
924 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
925 hci_dev_unlock(hdev);
926 break;
927
928 case LE_SCANNING_DISABLED:
929 if (status) {
930 hci_dev_lock(hdev);
931 mgmt_stop_discovery_failed(hdev, status);
932 hci_dev_unlock(hdev);
933 return;
934 }
935
936 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
937
938 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
939 hdev->discovery.state == DISCOVERY_FINDING) {
940 mgmt_interleaved_discovery(hdev);
941 } else {
942 hci_dev_lock(hdev);
943 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
944 hci_dev_unlock(hdev);
945 }
946
947 break;
948
949 default:
950 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
951 break;
952 }
953 }
954
955 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
956 struct sk_buff *skb)
957 {
958 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
959
960 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
961
962 if (!rp->status)
963 hdev->le_white_list_size = rp->size;
964 }
965
966 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
967 struct sk_buff *skb)
968 {
969 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
970
971 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
972
973 if (!rp->status)
974 memcpy(hdev->le_states, rp->le_states, 8);
975 }
976
977 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
978 struct sk_buff *skb)
979 {
980 struct hci_cp_write_le_host_supported *sent;
981 __u8 status = *((__u8 *) skb->data);
982
983 BT_DBG("%s status 0x%2.2x", hdev->name, status);
984
985 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
986 if (!sent)
987 return;
988
989 if (!status) {
990 if (sent->le)
991 hdev->host_features[0] |= LMP_HOST_LE;
992 else
993 hdev->host_features[0] &= ~LMP_HOST_LE;
994
995 if (sent->simul)
996 hdev->host_features[0] |= LMP_HOST_LE_BREDR;
997 else
998 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
999 }
1000
1001 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1002 !test_bit(HCI_INIT, &hdev->flags))
1003 mgmt_le_enable_complete(hdev, sent->le, status);
1004 }
1005
1006 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1007 struct sk_buff *skb)
1008 {
1009 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1010
1011 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1012 hdev->name, rp->status, rp->phy_handle);
1013
1014 if (rp->status)
1015 return;
1016
1017 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1018 }
1019
1020 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1021 {
1022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1023
1024 if (status) {
1025 hci_conn_check_pending(hdev);
1026 hci_dev_lock(hdev);
1027 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1028 mgmt_start_discovery_failed(hdev, status);
1029 hci_dev_unlock(hdev);
1030 return;
1031 }
1032
1033 set_bit(HCI_INQUIRY, &hdev->flags);
1034
1035 hci_dev_lock(hdev);
1036 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1037 hci_dev_unlock(hdev);
1038 }
1039
1040 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1041 {
1042 struct hci_cp_create_conn *cp;
1043 struct hci_conn *conn;
1044
1045 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1046
1047 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1048 if (!cp)
1049 return;
1050
1051 hci_dev_lock(hdev);
1052
1053 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1054
1055 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1056
1057 if (status) {
1058 if (conn && conn->state == BT_CONNECT) {
1059 if (status != 0x0c || conn->attempt > 2) {
1060 conn->state = BT_CLOSED;
1061 hci_proto_connect_cfm(conn, status);
1062 hci_conn_del(conn);
1063 } else
1064 conn->state = BT_CONNECT2;
1065 }
1066 } else {
1067 if (!conn) {
1068 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1069 if (conn) {
1070 conn->out = true;
1071 conn->link_mode |= HCI_LM_MASTER;
1072 } else
1073 BT_ERR("No memory for new connection");
1074 }
1075 }
1076
1077 hci_dev_unlock(hdev);
1078 }
1079
1080 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1081 {
1082 struct hci_cp_add_sco *cp;
1083 struct hci_conn *acl, *sco;
1084 __u16 handle;
1085
1086 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1087
1088 if (!status)
1089 return;
1090
1091 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1092 if (!cp)
1093 return;
1094
1095 handle = __le16_to_cpu(cp->handle);
1096
1097 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1098
1099 hci_dev_lock(hdev);
1100
1101 acl = hci_conn_hash_lookup_handle(hdev, handle);
1102 if (acl) {
1103 sco = acl->link;
1104 if (sco) {
1105 sco->state = BT_CLOSED;
1106
1107 hci_proto_connect_cfm(sco, status);
1108 hci_conn_del(sco);
1109 }
1110 }
1111
1112 hci_dev_unlock(hdev);
1113 }
1114
1115 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1116 {
1117 struct hci_cp_auth_requested *cp;
1118 struct hci_conn *conn;
1119
1120 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1121
1122 if (!status)
1123 return;
1124
1125 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1126 if (!cp)
1127 return;
1128
1129 hci_dev_lock(hdev);
1130
1131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1132 if (conn) {
1133 if (conn->state == BT_CONFIG) {
1134 hci_proto_connect_cfm(conn, status);
1135 hci_conn_put(conn);
1136 }
1137 }
1138
1139 hci_dev_unlock(hdev);
1140 }
1141
1142 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1143 {
1144 struct hci_cp_set_conn_encrypt *cp;
1145 struct hci_conn *conn;
1146
1147 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1148
1149 if (!status)
1150 return;
1151
1152 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1153 if (!cp)
1154 return;
1155
1156 hci_dev_lock(hdev);
1157
1158 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1159 if (conn) {
1160 if (conn->state == BT_CONFIG) {
1161 hci_proto_connect_cfm(conn, status);
1162 hci_conn_put(conn);
1163 }
1164 }
1165
1166 hci_dev_unlock(hdev);
1167 }
1168
1169 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1170 struct hci_conn *conn)
1171 {
1172 if (conn->state != BT_CONFIG || !conn->out)
1173 return 0;
1174
1175 if (conn->pending_sec_level == BT_SECURITY_SDP)
1176 return 0;
1177
1178 /* Only request authentication for SSP connections or non-SSP
1179 * devices with sec_level HIGH or if MITM protection is requested */
1180 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1181 conn->pending_sec_level != BT_SECURITY_HIGH)
1182 return 0;
1183
1184 return 1;
1185 }
1186
1187 static int hci_resolve_name(struct hci_dev *hdev,
1188 struct inquiry_entry *e)
1189 {
1190 struct hci_cp_remote_name_req cp;
1191
1192 memset(&cp, 0, sizeof(cp));
1193
1194 bacpy(&cp.bdaddr, &e->data.bdaddr);
1195 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1196 cp.pscan_mode = e->data.pscan_mode;
1197 cp.clock_offset = e->data.clock_offset;
1198
1199 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1200 }
1201
1202 static bool hci_resolve_next_name(struct hci_dev *hdev)
1203 {
1204 struct discovery_state *discov = &hdev->discovery;
1205 struct inquiry_entry *e;
1206
1207 if (list_empty(&discov->resolve))
1208 return false;
1209
1210 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1211 if (!e)
1212 return false;
1213
1214 if (hci_resolve_name(hdev, e) == 0) {
1215 e->name_state = NAME_PENDING;
1216 return true;
1217 }
1218
1219 return false;
1220 }
1221
1222 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1223 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1224 {
1225 struct discovery_state *discov = &hdev->discovery;
1226 struct inquiry_entry *e;
1227
1228 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1229 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1230 name_len, conn->dev_class);
1231
1232 if (discov->state == DISCOVERY_STOPPED)
1233 return;
1234
1235 if (discov->state == DISCOVERY_STOPPING)
1236 goto discov_complete;
1237
1238 if (discov->state != DISCOVERY_RESOLVING)
1239 return;
1240
1241 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1242 /* If the device was not found in a list of found devices names of which
1243 * are pending. there is no need to continue resolving a next name as it
1244 * will be done upon receiving another Remote Name Request Complete
1245 * Event */
1246 if (!e)
1247 return;
1248
1249 list_del(&e->list);
1250 if (name) {
1251 e->name_state = NAME_KNOWN;
1252 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1253 e->data.rssi, name, name_len);
1254 } else {
1255 e->name_state = NAME_NOT_KNOWN;
1256 }
1257
1258 if (hci_resolve_next_name(hdev))
1259 return;
1260
1261 discov_complete:
1262 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1263 }
1264
1265 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1266 {
1267 struct hci_cp_remote_name_req *cp;
1268 struct hci_conn *conn;
1269
1270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1271
1272 /* If successful wait for the name req complete event before
1273 * checking for the need to do authentication */
1274 if (!status)
1275 return;
1276
1277 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1278 if (!cp)
1279 return;
1280
1281 hci_dev_lock(hdev);
1282
1283 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1284
1285 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1286 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1287
1288 if (!conn)
1289 goto unlock;
1290
1291 if (!hci_outgoing_auth_needed(hdev, conn))
1292 goto unlock;
1293
1294 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1295 struct hci_cp_auth_requested cp;
1296 cp.handle = __cpu_to_le16(conn->handle);
1297 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1298 }
1299
1300 unlock:
1301 hci_dev_unlock(hdev);
1302 }
1303
1304 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1305 {
1306 struct hci_cp_read_remote_features *cp;
1307 struct hci_conn *conn;
1308
1309 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1310
1311 if (!status)
1312 return;
1313
1314 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1315 if (!cp)
1316 return;
1317
1318 hci_dev_lock(hdev);
1319
1320 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1321 if (conn) {
1322 if (conn->state == BT_CONFIG) {
1323 hci_proto_connect_cfm(conn, status);
1324 hci_conn_put(conn);
1325 }
1326 }
1327
1328 hci_dev_unlock(hdev);
1329 }
1330
1331 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1332 {
1333 struct hci_cp_read_remote_ext_features *cp;
1334 struct hci_conn *conn;
1335
1336 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1337
1338 if (!status)
1339 return;
1340
1341 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1342 if (!cp)
1343 return;
1344
1345 hci_dev_lock(hdev);
1346
1347 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1348 if (conn) {
1349 if (conn->state == BT_CONFIG) {
1350 hci_proto_connect_cfm(conn, status);
1351 hci_conn_put(conn);
1352 }
1353 }
1354
1355 hci_dev_unlock(hdev);
1356 }
1357
1358 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1359 {
1360 struct hci_cp_setup_sync_conn *cp;
1361 struct hci_conn *acl, *sco;
1362 __u16 handle;
1363
1364 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1365
1366 if (!status)
1367 return;
1368
1369 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1370 if (!cp)
1371 return;
1372
1373 handle = __le16_to_cpu(cp->handle);
1374
1375 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1376
1377 hci_dev_lock(hdev);
1378
1379 acl = hci_conn_hash_lookup_handle(hdev, handle);
1380 if (acl) {
1381 sco = acl->link;
1382 if (sco) {
1383 sco->state = BT_CLOSED;
1384
1385 hci_proto_connect_cfm(sco, status);
1386 hci_conn_del(sco);
1387 }
1388 }
1389
1390 hci_dev_unlock(hdev);
1391 }
1392
1393 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1394 {
1395 struct hci_cp_sniff_mode *cp;
1396 struct hci_conn *conn;
1397
1398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1399
1400 if (!status)
1401 return;
1402
1403 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1404 if (!cp)
1405 return;
1406
1407 hci_dev_lock(hdev);
1408
1409 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1410 if (conn) {
1411 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1412
1413 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1414 hci_sco_setup(conn, status);
1415 }
1416
1417 hci_dev_unlock(hdev);
1418 }
1419
1420 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1421 {
1422 struct hci_cp_exit_sniff_mode *cp;
1423 struct hci_conn *conn;
1424
1425 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1426
1427 if (!status)
1428 return;
1429
1430 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1431 if (!cp)
1432 return;
1433
1434 hci_dev_lock(hdev);
1435
1436 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1437 if (conn) {
1438 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1439
1440 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1441 hci_sco_setup(conn, status);
1442 }
1443
1444 hci_dev_unlock(hdev);
1445 }
1446
1447 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1448 {
1449 struct hci_cp_disconnect *cp;
1450 struct hci_conn *conn;
1451
1452 if (!status)
1453 return;
1454
1455 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1456 if (!cp)
1457 return;
1458
1459 hci_dev_lock(hdev);
1460
1461 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1462 if (conn)
1463 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1464 conn->dst_type, status);
1465
1466 hci_dev_unlock(hdev);
1467 }
1468
1469 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1470 {
1471 struct hci_conn *conn;
1472
1473 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1474
1475 if (status) {
1476 hci_dev_lock(hdev);
1477
1478 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1479 if (!conn) {
1480 hci_dev_unlock(hdev);
1481 return;
1482 }
1483
1484 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1485
1486 conn->state = BT_CLOSED;
1487 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1488 conn->dst_type, status);
1489 hci_proto_connect_cfm(conn, status);
1490 hci_conn_del(conn);
1491
1492 hci_dev_unlock(hdev);
1493 }
1494 }
1495
1496 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1497 {
1498 struct hci_cp_create_phy_link *cp;
1499
1500 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1501
1502 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1503 if (!cp)
1504 return;
1505
1506 hci_dev_lock(hdev);
1507
1508 if (status) {
1509 struct hci_conn *hcon;
1510
1511 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1512 if (hcon)
1513 hci_conn_del(hcon);
1514 } else {
1515 amp_write_remote_assoc(hdev, cp->phy_handle);
1516 }
1517
1518 hci_dev_unlock(hdev);
1519 }
1520
1521 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1522 {
1523 struct hci_cp_accept_phy_link *cp;
1524
1525 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1526
1527 if (status)
1528 return;
1529
1530 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1531 if (!cp)
1532 return;
1533
1534 amp_write_remote_assoc(hdev, cp->phy_handle);
1535 }
1536
1537 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1538 {
1539 __u8 status = *((__u8 *) skb->data);
1540 struct discovery_state *discov = &hdev->discovery;
1541 struct inquiry_entry *e;
1542
1543 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1544
1545 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status);
1546
1547 hci_conn_check_pending(hdev);
1548
1549 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1550 return;
1551
1552 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1553 return;
1554
1555 hci_dev_lock(hdev);
1556
1557 if (discov->state != DISCOVERY_FINDING)
1558 goto unlock;
1559
1560 if (list_empty(&discov->resolve)) {
1561 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1562 goto unlock;
1563 }
1564
1565 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1566 if (e && hci_resolve_name(hdev, e) == 0) {
1567 e->name_state = NAME_PENDING;
1568 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1569 } else {
1570 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1571 }
1572
1573 unlock:
1574 hci_dev_unlock(hdev);
1575 }
1576
1577 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1578 {
1579 struct inquiry_data data;
1580 struct inquiry_info *info = (void *) (skb->data + 1);
1581 int num_rsp = *((__u8 *) skb->data);
1582
1583 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1584
1585 if (!num_rsp)
1586 return;
1587
1588 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1589 return;
1590
1591 hci_dev_lock(hdev);
1592
1593 for (; num_rsp; num_rsp--, info++) {
1594 bool name_known, ssp;
1595
1596 bacpy(&data.bdaddr, &info->bdaddr);
1597 data.pscan_rep_mode = info->pscan_rep_mode;
1598 data.pscan_period_mode = info->pscan_period_mode;
1599 data.pscan_mode = info->pscan_mode;
1600 memcpy(data.dev_class, info->dev_class, 3);
1601 data.clock_offset = info->clock_offset;
1602 data.rssi = 0x00;
1603 data.ssp_mode = 0x00;
1604
1605 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1606 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1607 info->dev_class, 0, !name_known, ssp, NULL,
1608 0);
1609 }
1610
1611 hci_dev_unlock(hdev);
1612 }
1613
1614 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1615 {
1616 struct hci_ev_conn_complete *ev = (void *) skb->data;
1617 struct hci_conn *conn;
1618
1619 BT_DBG("%s", hdev->name);
1620
1621 hci_dev_lock(hdev);
1622
1623 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1624 if (!conn) {
1625 if (ev->link_type != SCO_LINK)
1626 goto unlock;
1627
1628 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1629 if (!conn)
1630 goto unlock;
1631
1632 conn->type = SCO_LINK;
1633 }
1634
1635 if (!ev->status) {
1636 conn->handle = __le16_to_cpu(ev->handle);
1637
1638 if (conn->type == ACL_LINK) {
1639 conn->state = BT_CONFIG;
1640 hci_conn_hold(conn);
1641
1642 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1643 !hci_find_link_key(hdev, &ev->bdaddr))
1644 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1645 else
1646 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1647 } else
1648 conn->state = BT_CONNECTED;
1649
1650 hci_conn_hold_device(conn);
1651 hci_conn_add_sysfs(conn);
1652
1653 if (test_bit(HCI_AUTH, &hdev->flags))
1654 conn->link_mode |= HCI_LM_AUTH;
1655
1656 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1657 conn->link_mode |= HCI_LM_ENCRYPT;
1658
1659 /* Get remote features */
1660 if (conn->type == ACL_LINK) {
1661 struct hci_cp_read_remote_features cp;
1662 cp.handle = ev->handle;
1663 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1664 sizeof(cp), &cp);
1665 }
1666
1667 /* Set packet type for incoming connection */
1668 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1669 struct hci_cp_change_conn_ptype cp;
1670 cp.handle = ev->handle;
1671 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1672 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1673 &cp);
1674 }
1675 } else {
1676 conn->state = BT_CLOSED;
1677 if (conn->type == ACL_LINK)
1678 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1679 conn->dst_type, ev->status);
1680 }
1681
1682 if (conn->type == ACL_LINK)
1683 hci_sco_setup(conn, ev->status);
1684
1685 if (ev->status) {
1686 hci_proto_connect_cfm(conn, ev->status);
1687 hci_conn_del(conn);
1688 } else if (ev->link_type != ACL_LINK)
1689 hci_proto_connect_cfm(conn, ev->status);
1690
1691 unlock:
1692 hci_dev_unlock(hdev);
1693
1694 hci_conn_check_pending(hdev);
1695 }
1696
1697 void hci_conn_accept(struct hci_conn *conn, int mask)
1698 {
1699 struct hci_dev *hdev = conn->hdev;
1700
1701 BT_DBG("conn %p", conn);
1702
1703 conn->state = BT_CONFIG;
1704
1705 if (!lmp_esco_capable(hdev)) {
1706 struct hci_cp_accept_conn_req cp;
1707
1708 bacpy(&cp.bdaddr, &conn->dst);
1709
1710 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1711 cp.role = 0x00; /* Become master */
1712 else
1713 cp.role = 0x01; /* Remain slave */
1714
1715 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
1716 } else /* lmp_esco_capable(hdev)) */ {
1717 struct hci_cp_accept_sync_conn_req cp;
1718
1719 bacpy(&cp.bdaddr, &conn->dst);
1720 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1721
1722 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1723 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1724 cp.max_latency = __constant_cpu_to_le16(0xffff);
1725 cp.content_format = cpu_to_le16(hdev->voice_setting);
1726 cp.retrans_effort = 0xff;
1727
1728 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1729 sizeof(cp), &cp);
1730 }
1731 }
1732
1733 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1734 {
1735 struct hci_ev_conn_request *ev = (void *) skb->data;
1736 int mask = hdev->link_mode;
1737 __u8 flags = 0;
1738
1739 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1740 ev->link_type);
1741
1742 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1743 &flags);
1744
1745 if ((mask & HCI_LM_ACCEPT) &&
1746 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1747 /* Connection accepted */
1748 struct inquiry_entry *ie;
1749 struct hci_conn *conn;
1750
1751 hci_dev_lock(hdev);
1752
1753 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1754 if (ie)
1755 memcpy(ie->data.dev_class, ev->dev_class, 3);
1756
1757 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1758 &ev->bdaddr);
1759 if (!conn) {
1760 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1761 if (!conn) {
1762 BT_ERR("No memory for new connection");
1763 hci_dev_unlock(hdev);
1764 return;
1765 }
1766 }
1767
1768 memcpy(conn->dev_class, ev->dev_class, 3);
1769
1770 hci_dev_unlock(hdev);
1771
1772 if (ev->link_type == ACL_LINK ||
1773 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1774 struct hci_cp_accept_conn_req cp;
1775 conn->state = BT_CONNECT;
1776
1777 bacpy(&cp.bdaddr, &ev->bdaddr);
1778
1779 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1780 cp.role = 0x00; /* Become master */
1781 else
1782 cp.role = 0x01; /* Remain slave */
1783
1784 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1785 &cp);
1786 } else if (!(flags & HCI_PROTO_DEFER)) {
1787 struct hci_cp_accept_sync_conn_req cp;
1788 conn->state = BT_CONNECT;
1789
1790 bacpy(&cp.bdaddr, &ev->bdaddr);
1791 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1792
1793 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1794 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1795 cp.max_latency = __constant_cpu_to_le16(0xffff);
1796 cp.content_format = cpu_to_le16(hdev->voice_setting);
1797 cp.retrans_effort = 0xff;
1798
1799 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1800 sizeof(cp), &cp);
1801 } else {
1802 conn->state = BT_CONNECT2;
1803 hci_proto_connect_cfm(conn, 0);
1804 hci_conn_put(conn);
1805 }
1806 } else {
1807 /* Connection rejected */
1808 struct hci_cp_reject_conn_req cp;
1809
1810 bacpy(&cp.bdaddr, &ev->bdaddr);
1811 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1812 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1813 }
1814 }
1815
1816 static u8 hci_to_mgmt_reason(u8 err)
1817 {
1818 switch (err) {
1819 case HCI_ERROR_CONNECTION_TIMEOUT:
1820 return MGMT_DEV_DISCONN_TIMEOUT;
1821 case HCI_ERROR_REMOTE_USER_TERM:
1822 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1823 case HCI_ERROR_REMOTE_POWER_OFF:
1824 return MGMT_DEV_DISCONN_REMOTE;
1825 case HCI_ERROR_LOCAL_HOST_TERM:
1826 return MGMT_DEV_DISCONN_LOCAL_HOST;
1827 default:
1828 return MGMT_DEV_DISCONN_UNKNOWN;
1829 }
1830 }
1831
1832 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1833 {
1834 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1835 struct hci_conn *conn;
1836
1837 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1838
1839 hci_dev_lock(hdev);
1840
1841 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1842 if (!conn)
1843 goto unlock;
1844
1845 if (ev->status == 0)
1846 conn->state = BT_CLOSED;
1847
1848 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1849 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1850 if (ev->status) {
1851 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1852 conn->dst_type, ev->status);
1853 } else {
1854 u8 reason = hci_to_mgmt_reason(ev->reason);
1855
1856 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1857 conn->dst_type, reason);
1858 }
1859 }
1860
1861 if (ev->status == 0) {
1862 if (conn->type == ACL_LINK && conn->flush_key)
1863 hci_remove_link_key(hdev, &conn->dst);
1864 hci_proto_disconn_cfm(conn, ev->reason);
1865 hci_conn_del(conn);
1866 }
1867
1868 unlock:
1869 hci_dev_unlock(hdev);
1870 }
1871
1872 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1873 {
1874 struct hci_ev_auth_complete *ev = (void *) skb->data;
1875 struct hci_conn *conn;
1876
1877 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1878
1879 hci_dev_lock(hdev);
1880
1881 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1882 if (!conn)
1883 goto unlock;
1884
1885 if (!ev->status) {
1886 if (!hci_conn_ssp_enabled(conn) &&
1887 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1888 BT_INFO("re-auth of legacy device is not possible.");
1889 } else {
1890 conn->link_mode |= HCI_LM_AUTH;
1891 conn->sec_level = conn->pending_sec_level;
1892 }
1893 } else {
1894 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1895 ev->status);
1896 }
1897
1898 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1899 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1900
1901 if (conn->state == BT_CONFIG) {
1902 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1903 struct hci_cp_set_conn_encrypt cp;
1904 cp.handle = ev->handle;
1905 cp.encrypt = 0x01;
1906 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1907 &cp);
1908 } else {
1909 conn->state = BT_CONNECTED;
1910 hci_proto_connect_cfm(conn, ev->status);
1911 hci_conn_put(conn);
1912 }
1913 } else {
1914 hci_auth_cfm(conn, ev->status);
1915
1916 hci_conn_hold(conn);
1917 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1918 hci_conn_put(conn);
1919 }
1920
1921 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1922 if (!ev->status) {
1923 struct hci_cp_set_conn_encrypt cp;
1924 cp.handle = ev->handle;
1925 cp.encrypt = 0x01;
1926 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1927 &cp);
1928 } else {
1929 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1930 hci_encrypt_cfm(conn, ev->status, 0x00);
1931 }
1932 }
1933
1934 unlock:
1935 hci_dev_unlock(hdev);
1936 }
1937
1938 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1939 {
1940 struct hci_ev_remote_name *ev = (void *) skb->data;
1941 struct hci_conn *conn;
1942
1943 BT_DBG("%s", hdev->name);
1944
1945 hci_conn_check_pending(hdev);
1946
1947 hci_dev_lock(hdev);
1948
1949 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1950
1951 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1952 goto check_auth;
1953
1954 if (ev->status == 0)
1955 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1956 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1957 else
1958 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1959
1960 check_auth:
1961 if (!conn)
1962 goto unlock;
1963
1964 if (!hci_outgoing_auth_needed(hdev, conn))
1965 goto unlock;
1966
1967 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1968 struct hci_cp_auth_requested cp;
1969 cp.handle = __cpu_to_le16(conn->handle);
1970 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1971 }
1972
1973 unlock:
1974 hci_dev_unlock(hdev);
1975 }
1976
1977 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1978 {
1979 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1980 struct hci_conn *conn;
1981
1982 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1983
1984 hci_dev_lock(hdev);
1985
1986 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1987 if (conn) {
1988 if (!ev->status) {
1989 if (ev->encrypt) {
1990 /* Encryption implies authentication */
1991 conn->link_mode |= HCI_LM_AUTH;
1992 conn->link_mode |= HCI_LM_ENCRYPT;
1993 conn->sec_level = conn->pending_sec_level;
1994 } else
1995 conn->link_mode &= ~HCI_LM_ENCRYPT;
1996 }
1997
1998 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1999
2000 if (ev->status && conn->state == BT_CONNECTED) {
2001 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2002 hci_conn_put(conn);
2003 goto unlock;
2004 }
2005
2006 if (conn->state == BT_CONFIG) {
2007 if (!ev->status)
2008 conn->state = BT_CONNECTED;
2009
2010 hci_proto_connect_cfm(conn, ev->status);
2011 hci_conn_put(conn);
2012 } else
2013 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2014 }
2015
2016 unlock:
2017 hci_dev_unlock(hdev);
2018 }
2019
2020 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2021 struct sk_buff *skb)
2022 {
2023 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2024 struct hci_conn *conn;
2025
2026 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2027
2028 hci_dev_lock(hdev);
2029
2030 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2031 if (conn) {
2032 if (!ev->status)
2033 conn->link_mode |= HCI_LM_SECURE;
2034
2035 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2036
2037 hci_key_change_cfm(conn, ev->status);
2038 }
2039
2040 hci_dev_unlock(hdev);
2041 }
2042
2043 static void hci_remote_features_evt(struct hci_dev *hdev,
2044 struct sk_buff *skb)
2045 {
2046 struct hci_ev_remote_features *ev = (void *) skb->data;
2047 struct hci_conn *conn;
2048
2049 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2050
2051 hci_dev_lock(hdev);
2052
2053 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2054 if (!conn)
2055 goto unlock;
2056
2057 if (!ev->status)
2058 memcpy(conn->features, ev->features, 8);
2059
2060 if (conn->state != BT_CONFIG)
2061 goto unlock;
2062
2063 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2064 struct hci_cp_read_remote_ext_features cp;
2065 cp.handle = ev->handle;
2066 cp.page = 0x01;
2067 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2068 sizeof(cp), &cp);
2069 goto unlock;
2070 }
2071
2072 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2073 struct hci_cp_remote_name_req cp;
2074 memset(&cp, 0, sizeof(cp));
2075 bacpy(&cp.bdaddr, &conn->dst);
2076 cp.pscan_rep_mode = 0x02;
2077 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2078 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2079 mgmt_device_connected(hdev, &conn->dst, conn->type,
2080 conn->dst_type, 0, NULL, 0,
2081 conn->dev_class);
2082
2083 if (!hci_outgoing_auth_needed(hdev, conn)) {
2084 conn->state = BT_CONNECTED;
2085 hci_proto_connect_cfm(conn, ev->status);
2086 hci_conn_put(conn);
2087 }
2088
2089 unlock:
2090 hci_dev_unlock(hdev);
2091 }
2092
2093 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2094 {
2095 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2096 u8 status = skb->data[sizeof(*ev)];
2097 __u16 opcode;
2098
2099 skb_pull(skb, sizeof(*ev));
2100
2101 opcode = __le16_to_cpu(ev->opcode);
2102
2103 switch (opcode) {
2104 case HCI_OP_INQUIRY_CANCEL:
2105 hci_cc_inquiry_cancel(hdev, skb);
2106 break;
2107
2108 case HCI_OP_PERIODIC_INQ:
2109 hci_cc_periodic_inq(hdev, skb);
2110 break;
2111
2112 case HCI_OP_EXIT_PERIODIC_INQ:
2113 hci_cc_exit_periodic_inq(hdev, skb);
2114 break;
2115
2116 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2117 hci_cc_remote_name_req_cancel(hdev, skb);
2118 break;
2119
2120 case HCI_OP_ROLE_DISCOVERY:
2121 hci_cc_role_discovery(hdev, skb);
2122 break;
2123
2124 case HCI_OP_READ_LINK_POLICY:
2125 hci_cc_read_link_policy(hdev, skb);
2126 break;
2127
2128 case HCI_OP_WRITE_LINK_POLICY:
2129 hci_cc_write_link_policy(hdev, skb);
2130 break;
2131
2132 case HCI_OP_READ_DEF_LINK_POLICY:
2133 hci_cc_read_def_link_policy(hdev, skb);
2134 break;
2135
2136 case HCI_OP_WRITE_DEF_LINK_POLICY:
2137 hci_cc_write_def_link_policy(hdev, skb);
2138 break;
2139
2140 case HCI_OP_RESET:
2141 hci_cc_reset(hdev, skb);
2142 break;
2143
2144 case HCI_OP_WRITE_LOCAL_NAME:
2145 hci_cc_write_local_name(hdev, skb);
2146 break;
2147
2148 case HCI_OP_READ_LOCAL_NAME:
2149 hci_cc_read_local_name(hdev, skb);
2150 break;
2151
2152 case HCI_OP_WRITE_AUTH_ENABLE:
2153 hci_cc_write_auth_enable(hdev, skb);
2154 break;
2155
2156 case HCI_OP_WRITE_ENCRYPT_MODE:
2157 hci_cc_write_encrypt_mode(hdev, skb);
2158 break;
2159
2160 case HCI_OP_WRITE_SCAN_ENABLE:
2161 hci_cc_write_scan_enable(hdev, skb);
2162 break;
2163
2164 case HCI_OP_READ_CLASS_OF_DEV:
2165 hci_cc_read_class_of_dev(hdev, skb);
2166 break;
2167
2168 case HCI_OP_WRITE_CLASS_OF_DEV:
2169 hci_cc_write_class_of_dev(hdev, skb);
2170 break;
2171
2172 case HCI_OP_READ_VOICE_SETTING:
2173 hci_cc_read_voice_setting(hdev, skb);
2174 break;
2175
2176 case HCI_OP_WRITE_VOICE_SETTING:
2177 hci_cc_write_voice_setting(hdev, skb);
2178 break;
2179
2180 case HCI_OP_WRITE_SSP_MODE:
2181 hci_cc_write_ssp_mode(hdev, skb);
2182 break;
2183
2184 case HCI_OP_READ_LOCAL_VERSION:
2185 hci_cc_read_local_version(hdev, skb);
2186 break;
2187
2188 case HCI_OP_READ_LOCAL_COMMANDS:
2189 hci_cc_read_local_commands(hdev, skb);
2190 break;
2191
2192 case HCI_OP_READ_LOCAL_FEATURES:
2193 hci_cc_read_local_features(hdev, skb);
2194 break;
2195
2196 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2197 hci_cc_read_local_ext_features(hdev, skb);
2198 break;
2199
2200 case HCI_OP_READ_BUFFER_SIZE:
2201 hci_cc_read_buffer_size(hdev, skb);
2202 break;
2203
2204 case HCI_OP_READ_BD_ADDR:
2205 hci_cc_read_bd_addr(hdev, skb);
2206 break;
2207
2208 case HCI_OP_READ_DATA_BLOCK_SIZE:
2209 hci_cc_read_data_block_size(hdev, skb);
2210 break;
2211
2212 case HCI_OP_READ_FLOW_CONTROL_MODE:
2213 hci_cc_read_flow_control_mode(hdev, skb);
2214 break;
2215
2216 case HCI_OP_READ_LOCAL_AMP_INFO:
2217 hci_cc_read_local_amp_info(hdev, skb);
2218 break;
2219
2220 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2221 hci_cc_read_local_amp_assoc(hdev, skb);
2222 break;
2223
2224 case HCI_OP_READ_INQ_RSP_TX_POWER:
2225 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2226 break;
2227
2228 case HCI_OP_PIN_CODE_REPLY:
2229 hci_cc_pin_code_reply(hdev, skb);
2230 break;
2231
2232 case HCI_OP_PIN_CODE_NEG_REPLY:
2233 hci_cc_pin_code_neg_reply(hdev, skb);
2234 break;
2235
2236 case HCI_OP_READ_LOCAL_OOB_DATA:
2237 hci_cc_read_local_oob_data_reply(hdev, skb);
2238 break;
2239
2240 case HCI_OP_LE_READ_BUFFER_SIZE:
2241 hci_cc_le_read_buffer_size(hdev, skb);
2242 break;
2243
2244 case HCI_OP_LE_READ_LOCAL_FEATURES:
2245 hci_cc_le_read_local_features(hdev, skb);
2246 break;
2247
2248 case HCI_OP_LE_READ_ADV_TX_POWER:
2249 hci_cc_le_read_adv_tx_power(hdev, skb);
2250 break;
2251
2252 case HCI_OP_USER_CONFIRM_REPLY:
2253 hci_cc_user_confirm_reply(hdev, skb);
2254 break;
2255
2256 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2257 hci_cc_user_confirm_neg_reply(hdev, skb);
2258 break;
2259
2260 case HCI_OP_USER_PASSKEY_REPLY:
2261 hci_cc_user_passkey_reply(hdev, skb);
2262 break;
2263
2264 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2265 hci_cc_user_passkey_neg_reply(hdev, skb);
2266 break;
2267
2268 case HCI_OP_LE_SET_SCAN_PARAM:
2269 hci_cc_le_set_scan_param(hdev, skb);
2270 break;
2271
2272 case HCI_OP_LE_SET_ADV_ENABLE:
2273 hci_cc_le_set_adv_enable(hdev, skb);
2274 break;
2275
2276 case HCI_OP_LE_SET_SCAN_ENABLE:
2277 hci_cc_le_set_scan_enable(hdev, skb);
2278 break;
2279
2280 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2281 hci_cc_le_read_white_list_size(hdev, skb);
2282 break;
2283
2284 case HCI_OP_LE_READ_SUPPORTED_STATES:
2285 hci_cc_le_read_supported_states(hdev, skb);
2286 break;
2287
2288 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2289 hci_cc_write_le_host_supported(hdev, skb);
2290 break;
2291
2292 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2293 hci_cc_write_remote_amp_assoc(hdev, skb);
2294 break;
2295
2296 default:
2297 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2298 break;
2299 }
2300
2301 if (opcode != HCI_OP_NOP)
2302 del_timer(&hdev->cmd_timer);
2303
2304 hci_req_cmd_complete(hdev, opcode, status);
2305
2306 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2307 atomic_set(&hdev->cmd_cnt, 1);
2308 if (!skb_queue_empty(&hdev->cmd_q))
2309 queue_work(hdev->workqueue, &hdev->cmd_work);
2310 }
2311 }
2312
2313 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2314 {
2315 struct hci_ev_cmd_status *ev = (void *) skb->data;
2316 __u16 opcode;
2317
2318 skb_pull(skb, sizeof(*ev));
2319
2320 opcode = __le16_to_cpu(ev->opcode);
2321
2322 switch (opcode) {
2323 case HCI_OP_INQUIRY:
2324 hci_cs_inquiry(hdev, ev->status);
2325 break;
2326
2327 case HCI_OP_CREATE_CONN:
2328 hci_cs_create_conn(hdev, ev->status);
2329 break;
2330
2331 case HCI_OP_ADD_SCO:
2332 hci_cs_add_sco(hdev, ev->status);
2333 break;
2334
2335 case HCI_OP_AUTH_REQUESTED:
2336 hci_cs_auth_requested(hdev, ev->status);
2337 break;
2338
2339 case HCI_OP_SET_CONN_ENCRYPT:
2340 hci_cs_set_conn_encrypt(hdev, ev->status);
2341 break;
2342
2343 case HCI_OP_REMOTE_NAME_REQ:
2344 hci_cs_remote_name_req(hdev, ev->status);
2345 break;
2346
2347 case HCI_OP_READ_REMOTE_FEATURES:
2348 hci_cs_read_remote_features(hdev, ev->status);
2349 break;
2350
2351 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2352 hci_cs_read_remote_ext_features(hdev, ev->status);
2353 break;
2354
2355 case HCI_OP_SETUP_SYNC_CONN:
2356 hci_cs_setup_sync_conn(hdev, ev->status);
2357 break;
2358
2359 case HCI_OP_SNIFF_MODE:
2360 hci_cs_sniff_mode(hdev, ev->status);
2361 break;
2362
2363 case HCI_OP_EXIT_SNIFF_MODE:
2364 hci_cs_exit_sniff_mode(hdev, ev->status);
2365 break;
2366
2367 case HCI_OP_DISCONNECT:
2368 hci_cs_disconnect(hdev, ev->status);
2369 break;
2370
2371 case HCI_OP_LE_CREATE_CONN:
2372 hci_cs_le_create_conn(hdev, ev->status);
2373 break;
2374
2375 case HCI_OP_CREATE_PHY_LINK:
2376 hci_cs_create_phylink(hdev, ev->status);
2377 break;
2378
2379 case HCI_OP_ACCEPT_PHY_LINK:
2380 hci_cs_accept_phylink(hdev, ev->status);
2381 break;
2382
2383 default:
2384 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2385 break;
2386 }
2387
2388 if (opcode != HCI_OP_NOP)
2389 del_timer(&hdev->cmd_timer);
2390
2391 hci_req_cmd_status(hdev, opcode, ev->status);
2392
2393 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2394 atomic_set(&hdev->cmd_cnt, 1);
2395 if (!skb_queue_empty(&hdev->cmd_q))
2396 queue_work(hdev->workqueue, &hdev->cmd_work);
2397 }
2398 }
2399
2400 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2401 {
2402 struct hci_ev_role_change *ev = (void *) skb->data;
2403 struct hci_conn *conn;
2404
2405 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2406
2407 hci_dev_lock(hdev);
2408
2409 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2410 if (conn) {
2411 if (!ev->status) {
2412 if (ev->role)
2413 conn->link_mode &= ~HCI_LM_MASTER;
2414 else
2415 conn->link_mode |= HCI_LM_MASTER;
2416 }
2417
2418 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2419
2420 hci_role_switch_cfm(conn, ev->status, ev->role);
2421 }
2422
2423 hci_dev_unlock(hdev);
2424 }
2425
2426 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2427 {
2428 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2429 int i;
2430
2431 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2432 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2433 return;
2434 }
2435
2436 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2437 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2438 BT_DBG("%s bad parameters", hdev->name);
2439 return;
2440 }
2441
2442 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2443
2444 for (i = 0; i < ev->num_hndl; i++) {
2445 struct hci_comp_pkts_info *info = &ev->handles[i];
2446 struct hci_conn *conn;
2447 __u16 handle, count;
2448
2449 handle = __le16_to_cpu(info->handle);
2450 count = __le16_to_cpu(info->count);
2451
2452 conn = hci_conn_hash_lookup_handle(hdev, handle);
2453 if (!conn)
2454 continue;
2455
2456 conn->sent -= count;
2457
2458 switch (conn->type) {
2459 case ACL_LINK:
2460 hdev->acl_cnt += count;
2461 if (hdev->acl_cnt > hdev->acl_pkts)
2462 hdev->acl_cnt = hdev->acl_pkts;
2463 break;
2464
2465 case LE_LINK:
2466 if (hdev->le_pkts) {
2467 hdev->le_cnt += count;
2468 if (hdev->le_cnt > hdev->le_pkts)
2469 hdev->le_cnt = hdev->le_pkts;
2470 } else {
2471 hdev->acl_cnt += count;
2472 if (hdev->acl_cnt > hdev->acl_pkts)
2473 hdev->acl_cnt = hdev->acl_pkts;
2474 }
2475 break;
2476
2477 case SCO_LINK:
2478 hdev->sco_cnt += count;
2479 if (hdev->sco_cnt > hdev->sco_pkts)
2480 hdev->sco_cnt = hdev->sco_pkts;
2481 break;
2482
2483 default:
2484 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2485 break;
2486 }
2487 }
2488
2489 queue_work(hdev->workqueue, &hdev->tx_work);
2490 }
2491
2492 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2493 __u16 handle)
2494 {
2495 struct hci_chan *chan;
2496
2497 switch (hdev->dev_type) {
2498 case HCI_BREDR:
2499 return hci_conn_hash_lookup_handle(hdev, handle);
2500 case HCI_AMP:
2501 chan = hci_chan_lookup_handle(hdev, handle);
2502 if (chan)
2503 return chan->conn;
2504 break;
2505 default:
2506 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2507 break;
2508 }
2509
2510 return NULL;
2511 }
2512
2513 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2514 {
2515 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2516 int i;
2517
2518 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2519 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2520 return;
2521 }
2522
2523 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2524 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2525 BT_DBG("%s bad parameters", hdev->name);
2526 return;
2527 }
2528
2529 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2530 ev->num_hndl);
2531
2532 for (i = 0; i < ev->num_hndl; i++) {
2533 struct hci_comp_blocks_info *info = &ev->handles[i];
2534 struct hci_conn *conn = NULL;
2535 __u16 handle, block_count;
2536
2537 handle = __le16_to_cpu(info->handle);
2538 block_count = __le16_to_cpu(info->blocks);
2539
2540 conn = __hci_conn_lookup_handle(hdev, handle);
2541 if (!conn)
2542 continue;
2543
2544 conn->sent -= block_count;
2545
2546 switch (conn->type) {
2547 case ACL_LINK:
2548 case AMP_LINK:
2549 hdev->block_cnt += block_count;
2550 if (hdev->block_cnt > hdev->num_blocks)
2551 hdev->block_cnt = hdev->num_blocks;
2552 break;
2553
2554 default:
2555 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2556 break;
2557 }
2558 }
2559
2560 queue_work(hdev->workqueue, &hdev->tx_work);
2561 }
2562
2563 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2564 {
2565 struct hci_ev_mode_change *ev = (void *) skb->data;
2566 struct hci_conn *conn;
2567
2568 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2569
2570 hci_dev_lock(hdev);
2571
2572 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2573 if (conn) {
2574 conn->mode = ev->mode;
2575 conn->interval = __le16_to_cpu(ev->interval);
2576
2577 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2578 &conn->flags)) {
2579 if (conn->mode == HCI_CM_ACTIVE)
2580 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2581 else
2582 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2583 }
2584
2585 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2586 hci_sco_setup(conn, ev->status);
2587 }
2588
2589 hci_dev_unlock(hdev);
2590 }
2591
2592 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2593 {
2594 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2595 struct hci_conn *conn;
2596
2597 BT_DBG("%s", hdev->name);
2598
2599 hci_dev_lock(hdev);
2600
2601 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2602 if (!conn)
2603 goto unlock;
2604
2605 if (conn->state == BT_CONNECTED) {
2606 hci_conn_hold(conn);
2607 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2608 hci_conn_put(conn);
2609 }
2610
2611 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2612 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2613 sizeof(ev->bdaddr), &ev->bdaddr);
2614 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2615 u8 secure;
2616
2617 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2618 secure = 1;
2619 else
2620 secure = 0;
2621
2622 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2623 }
2624
2625 unlock:
2626 hci_dev_unlock(hdev);
2627 }
2628
2629 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2630 {
2631 struct hci_ev_link_key_req *ev = (void *) skb->data;
2632 struct hci_cp_link_key_reply cp;
2633 struct hci_conn *conn;
2634 struct link_key *key;
2635
2636 BT_DBG("%s", hdev->name);
2637
2638 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2639 return;
2640
2641 hci_dev_lock(hdev);
2642
2643 key = hci_find_link_key(hdev, &ev->bdaddr);
2644 if (!key) {
2645 BT_DBG("%s link key not found for %pMR", hdev->name,
2646 &ev->bdaddr);
2647 goto not_found;
2648 }
2649
2650 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2651 &ev->bdaddr);
2652
2653 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2654 key->type == HCI_LK_DEBUG_COMBINATION) {
2655 BT_DBG("%s ignoring debug key", hdev->name);
2656 goto not_found;
2657 }
2658
2659 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2660 if (conn) {
2661 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2662 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2663 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2664 goto not_found;
2665 }
2666
2667 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2668 conn->pending_sec_level == BT_SECURITY_HIGH) {
2669 BT_DBG("%s ignoring key unauthenticated for high security",
2670 hdev->name);
2671 goto not_found;
2672 }
2673
2674 conn->key_type = key->type;
2675 conn->pin_length = key->pin_len;
2676 }
2677
2678 bacpy(&cp.bdaddr, &ev->bdaddr);
2679 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2680
2681 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2682
2683 hci_dev_unlock(hdev);
2684
2685 return;
2686
2687 not_found:
2688 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2689 hci_dev_unlock(hdev);
2690 }
2691
2692 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2693 {
2694 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2695 struct hci_conn *conn;
2696 u8 pin_len = 0;
2697
2698 BT_DBG("%s", hdev->name);
2699
2700 hci_dev_lock(hdev);
2701
2702 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2703 if (conn) {
2704 hci_conn_hold(conn);
2705 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2706 pin_len = conn->pin_length;
2707
2708 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2709 conn->key_type = ev->key_type;
2710
2711 hci_conn_put(conn);
2712 }
2713
2714 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2715 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2716 ev->key_type, pin_len);
2717
2718 hci_dev_unlock(hdev);
2719 }
2720
2721 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2722 {
2723 struct hci_ev_clock_offset *ev = (void *) skb->data;
2724 struct hci_conn *conn;
2725
2726 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2727
2728 hci_dev_lock(hdev);
2729
2730 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2731 if (conn && !ev->status) {
2732 struct inquiry_entry *ie;
2733
2734 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2735 if (ie) {
2736 ie->data.clock_offset = ev->clock_offset;
2737 ie->timestamp = jiffies;
2738 }
2739 }
2740
2741 hci_dev_unlock(hdev);
2742 }
2743
2744 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2745 {
2746 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2747 struct hci_conn *conn;
2748
2749 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2750
2751 hci_dev_lock(hdev);
2752
2753 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2754 if (conn && !ev->status)
2755 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2756
2757 hci_dev_unlock(hdev);
2758 }
2759
2760 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2761 {
2762 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2763 struct inquiry_entry *ie;
2764
2765 BT_DBG("%s", hdev->name);
2766
2767 hci_dev_lock(hdev);
2768
2769 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2770 if (ie) {
2771 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2772 ie->timestamp = jiffies;
2773 }
2774
2775 hci_dev_unlock(hdev);
2776 }
2777
2778 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2779 struct sk_buff *skb)
2780 {
2781 struct inquiry_data data;
2782 int num_rsp = *((__u8 *) skb->data);
2783 bool name_known, ssp;
2784
2785 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2786
2787 if (!num_rsp)
2788 return;
2789
2790 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2791 return;
2792
2793 hci_dev_lock(hdev);
2794
2795 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2796 struct inquiry_info_with_rssi_and_pscan_mode *info;
2797 info = (void *) (skb->data + 1);
2798
2799 for (; num_rsp; num_rsp--, info++) {
2800 bacpy(&data.bdaddr, &info->bdaddr);
2801 data.pscan_rep_mode = info->pscan_rep_mode;
2802 data.pscan_period_mode = info->pscan_period_mode;
2803 data.pscan_mode = info->pscan_mode;
2804 memcpy(data.dev_class, info->dev_class, 3);
2805 data.clock_offset = info->clock_offset;
2806 data.rssi = info->rssi;
2807 data.ssp_mode = 0x00;
2808
2809 name_known = hci_inquiry_cache_update(hdev, &data,
2810 false, &ssp);
2811 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2812 info->dev_class, info->rssi,
2813 !name_known, ssp, NULL, 0);
2814 }
2815 } else {
2816 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2817
2818 for (; num_rsp; num_rsp--, info++) {
2819 bacpy(&data.bdaddr, &info->bdaddr);
2820 data.pscan_rep_mode = info->pscan_rep_mode;
2821 data.pscan_period_mode = info->pscan_period_mode;
2822 data.pscan_mode = 0x00;
2823 memcpy(data.dev_class, info->dev_class, 3);
2824 data.clock_offset = info->clock_offset;
2825 data.rssi = info->rssi;
2826 data.ssp_mode = 0x00;
2827 name_known = hci_inquiry_cache_update(hdev, &data,
2828 false, &ssp);
2829 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2830 info->dev_class, info->rssi,
2831 !name_known, ssp, NULL, 0);
2832 }
2833 }
2834
2835 hci_dev_unlock(hdev);
2836 }
2837
2838 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2839 struct sk_buff *skb)
2840 {
2841 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2842 struct hci_conn *conn;
2843
2844 BT_DBG("%s", hdev->name);
2845
2846 hci_dev_lock(hdev);
2847
2848 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2849 if (!conn)
2850 goto unlock;
2851
2852 if (!ev->status && ev->page == 0x01) {
2853 struct inquiry_entry *ie;
2854
2855 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2856 if (ie)
2857 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2858
2859 if (ev->features[0] & LMP_HOST_SSP)
2860 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2861 }
2862
2863 if (conn->state != BT_CONFIG)
2864 goto unlock;
2865
2866 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2867 struct hci_cp_remote_name_req cp;
2868 memset(&cp, 0, sizeof(cp));
2869 bacpy(&cp.bdaddr, &conn->dst);
2870 cp.pscan_rep_mode = 0x02;
2871 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2872 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2873 mgmt_device_connected(hdev, &conn->dst, conn->type,
2874 conn->dst_type, 0, NULL, 0,
2875 conn->dev_class);
2876
2877 if (!hci_outgoing_auth_needed(hdev, conn)) {
2878 conn->state = BT_CONNECTED;
2879 hci_proto_connect_cfm(conn, ev->status);
2880 hci_conn_put(conn);
2881 }
2882
2883 unlock:
2884 hci_dev_unlock(hdev);
2885 }
2886
2887 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2888 struct sk_buff *skb)
2889 {
2890 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2891 struct hci_conn *conn;
2892
2893 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2894
2895 hci_dev_lock(hdev);
2896
2897 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2898 if (!conn) {
2899 if (ev->link_type == ESCO_LINK)
2900 goto unlock;
2901
2902 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2903 if (!conn)
2904 goto unlock;
2905
2906 conn->type = SCO_LINK;
2907 }
2908
2909 switch (ev->status) {
2910 case 0x00:
2911 conn->handle = __le16_to_cpu(ev->handle);
2912 conn->state = BT_CONNECTED;
2913
2914 hci_conn_hold_device(conn);
2915 hci_conn_add_sysfs(conn);
2916 break;
2917
2918 case 0x11: /* Unsupported Feature or Parameter Value */
2919 case 0x1c: /* SCO interval rejected */
2920 case 0x1a: /* Unsupported Remote Feature */
2921 case 0x1f: /* Unspecified error */
2922 if (conn->out && conn->attempt < 2) {
2923 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2924 (hdev->esco_type & EDR_ESCO_MASK);
2925 hci_setup_sync(conn, conn->link->handle);
2926 goto unlock;
2927 }
2928 /* fall through */
2929
2930 default:
2931 conn->state = BT_CLOSED;
2932 break;
2933 }
2934
2935 hci_proto_connect_cfm(conn, ev->status);
2936 if (ev->status)
2937 hci_conn_del(conn);
2938
2939 unlock:
2940 hci_dev_unlock(hdev);
2941 }
2942
2943 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2944 struct sk_buff *skb)
2945 {
2946 struct inquiry_data data;
2947 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2948 int num_rsp = *((__u8 *) skb->data);
2949 size_t eir_len;
2950
2951 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2952
2953 if (!num_rsp)
2954 return;
2955
2956 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2957 return;
2958
2959 hci_dev_lock(hdev);
2960
2961 for (; num_rsp; num_rsp--, info++) {
2962 bool name_known, ssp;
2963
2964 bacpy(&data.bdaddr, &info->bdaddr);
2965 data.pscan_rep_mode = info->pscan_rep_mode;
2966 data.pscan_period_mode = info->pscan_period_mode;
2967 data.pscan_mode = 0x00;
2968 memcpy(data.dev_class, info->dev_class, 3);
2969 data.clock_offset = info->clock_offset;
2970 data.rssi = info->rssi;
2971 data.ssp_mode = 0x01;
2972
2973 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2974 name_known = eir_has_data_type(info->data,
2975 sizeof(info->data),
2976 EIR_NAME_COMPLETE);
2977 else
2978 name_known = true;
2979
2980 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2981 &ssp);
2982 eir_len = eir_get_length(info->data, sizeof(info->data));
2983 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2984 info->dev_class, info->rssi, !name_known,
2985 ssp, info->data, eir_len);
2986 }
2987
2988 hci_dev_unlock(hdev);
2989 }
2990
2991 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
2992 struct sk_buff *skb)
2993 {
2994 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
2995 struct hci_conn *conn;
2996
2997 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
2998 __le16_to_cpu(ev->handle));
2999
3000 hci_dev_lock(hdev);
3001
3002 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3003 if (!conn)
3004 goto unlock;
3005
3006 if (!ev->status)
3007 conn->sec_level = conn->pending_sec_level;
3008
3009 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3010
3011 if (ev->status && conn->state == BT_CONNECTED) {
3012 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3013 hci_conn_put(conn);
3014 goto unlock;
3015 }
3016
3017 if (conn->state == BT_CONFIG) {
3018 if (!ev->status)
3019 conn->state = BT_CONNECTED;
3020
3021 hci_proto_connect_cfm(conn, ev->status);
3022 hci_conn_put(conn);
3023 } else {
3024 hci_auth_cfm(conn, ev->status);
3025
3026 hci_conn_hold(conn);
3027 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3028 hci_conn_put(conn);
3029 }
3030
3031 unlock:
3032 hci_dev_unlock(hdev);
3033 }
3034
3035 static u8 hci_get_auth_req(struct hci_conn *conn)
3036 {
3037 /* If remote requests dedicated bonding follow that lead */
3038 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3039 /* If both remote and local IO capabilities allow MITM
3040 * protection then require it, otherwise don't */
3041 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3042 return 0x02;
3043 else
3044 return 0x03;
3045 }
3046
3047 /* If remote requests no-bonding follow that lead */
3048 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3049 return conn->remote_auth | (conn->auth_type & 0x01);
3050
3051 return conn->auth_type;
3052 }
3053
3054 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3055 {
3056 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3057 struct hci_conn *conn;
3058
3059 BT_DBG("%s", hdev->name);
3060
3061 hci_dev_lock(hdev);
3062
3063 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3064 if (!conn)
3065 goto unlock;
3066
3067 hci_conn_hold(conn);
3068
3069 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3070 goto unlock;
3071
3072 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3073 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3074 struct hci_cp_io_capability_reply cp;
3075
3076 bacpy(&cp.bdaddr, &ev->bdaddr);
3077 /* Change the IO capability from KeyboardDisplay
3078 * to DisplayYesNo as it is not supported by BT spec. */
3079 cp.capability = (conn->io_capability == 0x04) ?
3080 0x01 : conn->io_capability;
3081 conn->auth_type = hci_get_auth_req(conn);
3082 cp.authentication = conn->auth_type;
3083
3084 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3085 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3086 cp.oob_data = 0x01;
3087 else
3088 cp.oob_data = 0x00;
3089
3090 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3091 sizeof(cp), &cp);
3092 } else {
3093 struct hci_cp_io_capability_neg_reply cp;
3094
3095 bacpy(&cp.bdaddr, &ev->bdaddr);
3096 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3097
3098 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3099 sizeof(cp), &cp);
3100 }
3101
3102 unlock:
3103 hci_dev_unlock(hdev);
3104 }
3105
3106 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3107 {
3108 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3109 struct hci_conn *conn;
3110
3111 BT_DBG("%s", hdev->name);
3112
3113 hci_dev_lock(hdev);
3114
3115 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3116 if (!conn)
3117 goto unlock;
3118
3119 conn->remote_cap = ev->capability;
3120 conn->remote_auth = ev->authentication;
3121 if (ev->oob_data)
3122 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3123
3124 unlock:
3125 hci_dev_unlock(hdev);
3126 }
3127
3128 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3129 struct sk_buff *skb)
3130 {
3131 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3132 int loc_mitm, rem_mitm, confirm_hint = 0;
3133 struct hci_conn *conn;
3134
3135 BT_DBG("%s", hdev->name);
3136
3137 hci_dev_lock(hdev);
3138
3139 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3140 goto unlock;
3141
3142 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3143 if (!conn)
3144 goto unlock;
3145
3146 loc_mitm = (conn->auth_type & 0x01);
3147 rem_mitm = (conn->remote_auth & 0x01);
3148
3149 /* If we require MITM but the remote device can't provide that
3150 * (it has NoInputNoOutput) then reject the confirmation
3151 * request. The only exception is when we're dedicated bonding
3152 * initiators (connect_cfm_cb set) since then we always have the MITM
3153 * bit set. */
3154 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3155 BT_DBG("Rejecting request: remote device can't provide MITM");
3156 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3157 sizeof(ev->bdaddr), &ev->bdaddr);
3158 goto unlock;
3159 }
3160
3161 /* If no side requires MITM protection; auto-accept */
3162 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3163 (!rem_mitm || conn->io_capability == 0x03)) {
3164
3165 /* If we're not the initiators request authorization to
3166 * proceed from user space (mgmt_user_confirm with
3167 * confirm_hint set to 1). */
3168 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3169 BT_DBG("Confirming auto-accept as acceptor");
3170 confirm_hint = 1;
3171 goto confirm;
3172 }
3173
3174 BT_DBG("Auto-accept of user confirmation with %ums delay",
3175 hdev->auto_accept_delay);
3176
3177 if (hdev->auto_accept_delay > 0) {
3178 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3179 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3180 goto unlock;
3181 }
3182
3183 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3184 sizeof(ev->bdaddr), &ev->bdaddr);
3185 goto unlock;
3186 }
3187
3188 confirm:
3189 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3190 confirm_hint);
3191
3192 unlock:
3193 hci_dev_unlock(hdev);
3194 }
3195
3196 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3197 struct sk_buff *skb)
3198 {
3199 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3200
3201 BT_DBG("%s", hdev->name);
3202
3203 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3204 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3205 }
3206
3207 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3208 struct sk_buff *skb)
3209 {
3210 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3211 struct hci_conn *conn;
3212
3213 BT_DBG("%s", hdev->name);
3214
3215 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3216 if (!conn)
3217 return;
3218
3219 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3220 conn->passkey_entered = 0;
3221
3222 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3223 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3224 conn->dst_type, conn->passkey_notify,
3225 conn->passkey_entered);
3226 }
3227
3228 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3229 {
3230 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3231 struct hci_conn *conn;
3232
3233 BT_DBG("%s", hdev->name);
3234
3235 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3236 if (!conn)
3237 return;
3238
3239 switch (ev->type) {
3240 case HCI_KEYPRESS_STARTED:
3241 conn->passkey_entered = 0;
3242 return;
3243
3244 case HCI_KEYPRESS_ENTERED:
3245 conn->passkey_entered++;
3246 break;
3247
3248 case HCI_KEYPRESS_ERASED:
3249 conn->passkey_entered--;
3250 break;
3251
3252 case HCI_KEYPRESS_CLEARED:
3253 conn->passkey_entered = 0;
3254 break;
3255
3256 case HCI_KEYPRESS_COMPLETED:
3257 return;
3258 }
3259
3260 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3261 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3262 conn->dst_type, conn->passkey_notify,
3263 conn->passkey_entered);
3264 }
3265
3266 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3267 struct sk_buff *skb)
3268 {
3269 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3270 struct hci_conn *conn;
3271
3272 BT_DBG("%s", hdev->name);
3273
3274 hci_dev_lock(hdev);
3275
3276 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3277 if (!conn)
3278 goto unlock;
3279
3280 /* To avoid duplicate auth_failed events to user space we check
3281 * the HCI_CONN_AUTH_PEND flag which will be set if we
3282 * initiated the authentication. A traditional auth_complete
3283 * event gets always produced as initiator and is also mapped to
3284 * the mgmt_auth_failed event */
3285 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3286 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3287 ev->status);
3288
3289 hci_conn_put(conn);
3290
3291 unlock:
3292 hci_dev_unlock(hdev);
3293 }
3294
3295 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3296 struct sk_buff *skb)
3297 {
3298 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3299 struct inquiry_entry *ie;
3300
3301 BT_DBG("%s", hdev->name);
3302
3303 hci_dev_lock(hdev);
3304
3305 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3306 if (ie)
3307 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3308
3309 hci_dev_unlock(hdev);
3310 }
3311
3312 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3313 struct sk_buff *skb)
3314 {
3315 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3316 struct oob_data *data;
3317
3318 BT_DBG("%s", hdev->name);
3319
3320 hci_dev_lock(hdev);
3321
3322 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3323 goto unlock;
3324
3325 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3326 if (data) {
3327 struct hci_cp_remote_oob_data_reply cp;
3328
3329 bacpy(&cp.bdaddr, &ev->bdaddr);
3330 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3331 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3332
3333 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3334 &cp);
3335 } else {
3336 struct hci_cp_remote_oob_data_neg_reply cp;
3337
3338 bacpy(&cp.bdaddr, &ev->bdaddr);
3339 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3340 &cp);
3341 }
3342
3343 unlock:
3344 hci_dev_unlock(hdev);
3345 }
3346
3347 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3348 struct sk_buff *skb)
3349 {
3350 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3351 struct hci_conn *hcon, *bredr_hcon;
3352
3353 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3354 ev->status);
3355
3356 hci_dev_lock(hdev);
3357
3358 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3359 if (!hcon) {
3360 hci_dev_unlock(hdev);
3361 return;
3362 }
3363
3364 if (ev->status) {
3365 hci_conn_del(hcon);
3366 hci_dev_unlock(hdev);
3367 return;
3368 }
3369
3370 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3371
3372 hcon->state = BT_CONNECTED;
3373 bacpy(&hcon->dst, &bredr_hcon->dst);
3374
3375 hci_conn_hold(hcon);
3376 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3377 hci_conn_put(hcon);
3378
3379 hci_conn_hold_device(hcon);
3380 hci_conn_add_sysfs(hcon);
3381
3382 amp_physical_cfm(bredr_hcon, hcon);
3383
3384 hci_dev_unlock(hdev);
3385 }
3386
3387 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3388 {
3389 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3390 struct hci_conn *hcon;
3391 struct hci_chan *hchan;
3392 struct amp_mgr *mgr;
3393
3394 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3395 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3396 ev->status);
3397
3398 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3399 if (!hcon)
3400 return;
3401
3402 /* Create AMP hchan */
3403 hchan = hci_chan_create(hcon);
3404 if (!hchan)
3405 return;
3406
3407 hchan->handle = le16_to_cpu(ev->handle);
3408
3409 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3410
3411 mgr = hcon->amp_mgr;
3412 if (mgr && mgr->bredr_chan) {
3413 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3414
3415 l2cap_chan_lock(bredr_chan);
3416
3417 bredr_chan->conn->mtu = hdev->block_mtu;
3418 l2cap_logical_cfm(bredr_chan, hchan, 0);
3419 hci_conn_hold(hcon);
3420
3421 l2cap_chan_unlock(bredr_chan);
3422 }
3423 }
3424
3425 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3426 struct sk_buff *skb)
3427 {
3428 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3429 struct hci_chan *hchan;
3430
3431 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3432 le16_to_cpu(ev->handle), ev->status);
3433
3434 if (ev->status)
3435 return;
3436
3437 hci_dev_lock(hdev);
3438
3439 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3440 if (!hchan)
3441 goto unlock;
3442
3443 amp_destroy_logical_link(hchan, ev->reason);
3444
3445 unlock:
3446 hci_dev_unlock(hdev);
3447 }
3448
3449 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3450 struct sk_buff *skb)
3451 {
3452 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3453 struct hci_conn *hcon;
3454
3455 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3456
3457 if (ev->status)
3458 return;
3459
3460 hci_dev_lock(hdev);
3461
3462 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3463 if (hcon) {
3464 hcon->state = BT_CLOSED;
3465 hci_conn_del(hcon);
3466 }
3467
3468 hci_dev_unlock(hdev);
3469 }
3470
3471 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3472 {
3473 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3474 struct hci_conn *conn;
3475
3476 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3477
3478 hci_dev_lock(hdev);
3479
3480 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3481 if (!conn) {
3482 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3483 if (!conn) {
3484 BT_ERR("No memory for new connection");
3485 goto unlock;
3486 }
3487
3488 conn->dst_type = ev->bdaddr_type;
3489
3490 if (ev->role == LE_CONN_ROLE_MASTER) {
3491 conn->out = true;
3492 conn->link_mode |= HCI_LM_MASTER;
3493 }
3494 }
3495
3496 if (ev->status) {
3497 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3498 conn->dst_type, ev->status);
3499 hci_proto_connect_cfm(conn, ev->status);
3500 conn->state = BT_CLOSED;
3501 hci_conn_del(conn);
3502 goto unlock;
3503 }
3504
3505 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3506 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3507 conn->dst_type, 0, NULL, 0, NULL);
3508
3509 conn->sec_level = BT_SECURITY_LOW;
3510 conn->handle = __le16_to_cpu(ev->handle);
3511 conn->state = BT_CONNECTED;
3512
3513 hci_conn_hold_device(conn);
3514 hci_conn_add_sysfs(conn);
3515
3516 hci_proto_connect_cfm(conn, ev->status);
3517
3518 unlock:
3519 hci_dev_unlock(hdev);
3520 }
3521
3522 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3523 {
3524 u8 num_reports = skb->data[0];
3525 void *ptr = &skb->data[1];
3526 s8 rssi;
3527
3528 while (num_reports--) {
3529 struct hci_ev_le_advertising_info *ev = ptr;
3530
3531 rssi = ev->data[ev->length];
3532 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3533 NULL, rssi, 0, 1, ev->data, ev->length);
3534
3535 ptr += sizeof(*ev) + ev->length + 1;
3536 }
3537 }
3538
3539 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3540 {
3541 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3542 struct hci_cp_le_ltk_reply cp;
3543 struct hci_cp_le_ltk_neg_reply neg;
3544 struct hci_conn *conn;
3545 struct smp_ltk *ltk;
3546
3547 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3548
3549 hci_dev_lock(hdev);
3550
3551 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3552 if (conn == NULL)
3553 goto not_found;
3554
3555 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3556 if (ltk == NULL)
3557 goto not_found;
3558
3559 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3560 cp.handle = cpu_to_le16(conn->handle);
3561
3562 if (ltk->authenticated)
3563 conn->sec_level = BT_SECURITY_HIGH;
3564
3565 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3566
3567 if (ltk->type & HCI_SMP_STK) {
3568 list_del(&ltk->list);
3569 kfree(ltk);
3570 }
3571
3572 hci_dev_unlock(hdev);
3573
3574 return;
3575
3576 not_found:
3577 neg.handle = ev->handle;
3578 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3579 hci_dev_unlock(hdev);
3580 }
3581
3582 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3583 {
3584 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3585
3586 skb_pull(skb, sizeof(*le_ev));
3587
3588 switch (le_ev->subevent) {
3589 case HCI_EV_LE_CONN_COMPLETE:
3590 hci_le_conn_complete_evt(hdev, skb);
3591 break;
3592
3593 case HCI_EV_LE_ADVERTISING_REPORT:
3594 hci_le_adv_report_evt(hdev, skb);
3595 break;
3596
3597 case HCI_EV_LE_LTK_REQ:
3598 hci_le_ltk_request_evt(hdev, skb);
3599 break;
3600
3601 default:
3602 break;
3603 }
3604 }
3605
3606 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3607 {
3608 struct hci_ev_channel_selected *ev = (void *) skb->data;
3609 struct hci_conn *hcon;
3610
3611 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3612
3613 skb_pull(skb, sizeof(*ev));
3614
3615 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3616 if (!hcon)
3617 return;
3618
3619 amp_read_loc_assoc_final_data(hdev, hcon);
3620 }
3621
3622 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3623 {
3624 struct hci_event_hdr *hdr = (void *) skb->data;
3625 __u8 event = hdr->evt;
3626
3627 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3628
3629 switch (event) {
3630 case HCI_EV_INQUIRY_COMPLETE:
3631 hci_inquiry_complete_evt(hdev, skb);
3632 break;
3633
3634 case HCI_EV_INQUIRY_RESULT:
3635 hci_inquiry_result_evt(hdev, skb);
3636 break;
3637
3638 case HCI_EV_CONN_COMPLETE:
3639 hci_conn_complete_evt(hdev, skb);
3640 break;
3641
3642 case HCI_EV_CONN_REQUEST:
3643 hci_conn_request_evt(hdev, skb);
3644 break;
3645
3646 case HCI_EV_DISCONN_COMPLETE:
3647 hci_disconn_complete_evt(hdev, skb);
3648 break;
3649
3650 case HCI_EV_AUTH_COMPLETE:
3651 hci_auth_complete_evt(hdev, skb);
3652 break;
3653
3654 case HCI_EV_REMOTE_NAME:
3655 hci_remote_name_evt(hdev, skb);
3656 break;
3657
3658 case HCI_EV_ENCRYPT_CHANGE:
3659 hci_encrypt_change_evt(hdev, skb);
3660 break;
3661
3662 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3663 hci_change_link_key_complete_evt(hdev, skb);
3664 break;
3665
3666 case HCI_EV_REMOTE_FEATURES:
3667 hci_remote_features_evt(hdev, skb);
3668 break;
3669
3670 case HCI_EV_CMD_COMPLETE:
3671 hci_cmd_complete_evt(hdev, skb);
3672 break;
3673
3674 case HCI_EV_CMD_STATUS:
3675 hci_cmd_status_evt(hdev, skb);
3676 break;
3677
3678 case HCI_EV_ROLE_CHANGE:
3679 hci_role_change_evt(hdev, skb);
3680 break;
3681
3682 case HCI_EV_NUM_COMP_PKTS:
3683 hci_num_comp_pkts_evt(hdev, skb);
3684 break;
3685
3686 case HCI_EV_MODE_CHANGE:
3687 hci_mode_change_evt(hdev, skb);
3688 break;
3689
3690 case HCI_EV_PIN_CODE_REQ:
3691 hci_pin_code_request_evt(hdev, skb);
3692 break;
3693
3694 case HCI_EV_LINK_KEY_REQ:
3695 hci_link_key_request_evt(hdev, skb);
3696 break;
3697
3698 case HCI_EV_LINK_KEY_NOTIFY:
3699 hci_link_key_notify_evt(hdev, skb);
3700 break;
3701
3702 case HCI_EV_CLOCK_OFFSET:
3703 hci_clock_offset_evt(hdev, skb);
3704 break;
3705
3706 case HCI_EV_PKT_TYPE_CHANGE:
3707 hci_pkt_type_change_evt(hdev, skb);
3708 break;
3709
3710 case HCI_EV_PSCAN_REP_MODE:
3711 hci_pscan_rep_mode_evt(hdev, skb);
3712 break;
3713
3714 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3715 hci_inquiry_result_with_rssi_evt(hdev, skb);
3716 break;
3717
3718 case HCI_EV_REMOTE_EXT_FEATURES:
3719 hci_remote_ext_features_evt(hdev, skb);
3720 break;
3721
3722 case HCI_EV_SYNC_CONN_COMPLETE:
3723 hci_sync_conn_complete_evt(hdev, skb);
3724 break;
3725
3726 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3727 hci_extended_inquiry_result_evt(hdev, skb);
3728 break;
3729
3730 case HCI_EV_KEY_REFRESH_COMPLETE:
3731 hci_key_refresh_complete_evt(hdev, skb);
3732 break;
3733
3734 case HCI_EV_IO_CAPA_REQUEST:
3735 hci_io_capa_request_evt(hdev, skb);
3736 break;
3737
3738 case HCI_EV_IO_CAPA_REPLY:
3739 hci_io_capa_reply_evt(hdev, skb);
3740 break;
3741
3742 case HCI_EV_USER_CONFIRM_REQUEST:
3743 hci_user_confirm_request_evt(hdev, skb);
3744 break;
3745
3746 case HCI_EV_USER_PASSKEY_REQUEST:
3747 hci_user_passkey_request_evt(hdev, skb);
3748 break;
3749
3750 case HCI_EV_USER_PASSKEY_NOTIFY:
3751 hci_user_passkey_notify_evt(hdev, skb);
3752 break;
3753
3754 case HCI_EV_KEYPRESS_NOTIFY:
3755 hci_keypress_notify_evt(hdev, skb);
3756 break;
3757
3758 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3759 hci_simple_pair_complete_evt(hdev, skb);
3760 break;
3761
3762 case HCI_EV_REMOTE_HOST_FEATURES:
3763 hci_remote_host_features_evt(hdev, skb);
3764 break;
3765
3766 case HCI_EV_LE_META:
3767 hci_le_meta_evt(hdev, skb);
3768 break;
3769
3770 case HCI_EV_CHANNEL_SELECTED:
3771 hci_chan_selected_evt(hdev, skb);
3772 break;
3773
3774 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3775 hci_remote_oob_data_request_evt(hdev, skb);
3776 break;
3777
3778 case HCI_EV_PHY_LINK_COMPLETE:
3779 hci_phy_link_complete_evt(hdev, skb);
3780 break;
3781
3782 case HCI_EV_LOGICAL_LINK_COMPLETE:
3783 hci_loglink_complete_evt(hdev, skb);
3784 break;
3785
3786 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3787 hci_disconn_loglink_complete_evt(hdev, skb);
3788 break;
3789
3790 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3791 hci_disconn_phylink_complete_evt(hdev, skb);
3792 break;
3793
3794 case HCI_EV_NUM_COMP_BLOCKS:
3795 hci_num_comp_blocks_evt(hdev, skb);
3796 break;
3797
3798 default:
3799 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3800 break;
3801 }
3802
3803 kfree_skb(skb);
3804 hdev->stat.evt_rx++;
3805 }