Bluetooth: Fix stand-alone HCI command handling
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32#include <net/bluetooth/a2mp.h>
33#include <net/bluetooth/amp.h>
34
35/* Handle HCI Event packets */
36
37static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38{
39 __u8 status = *((__u8 *) skb->data);
40
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
42
43 if (status) {
44 hci_dev_lock(hdev);
45 mgmt_stop_discovery_failed(hdev, status);
46 hci_dev_unlock(hdev);
47 return;
48 }
49
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
57
58 hci_conn_check_pending(hdev);
59}
60
61static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62{
63 __u8 status = *((__u8 *) skb->data);
64
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
66
67 if (status)
68 return;
69
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71}
72
73static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74{
75 __u8 status = *((__u8 *) skb->data);
76
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78
79 if (status)
80 return;
81
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83
84 hci_conn_check_pending(hdev);
85}
86
87static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 struct sk_buff *skb)
89{
90 BT_DBG("%s", hdev->name);
91}
92
93static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94{
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114}
115
116static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117{
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133}
134
135static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136{
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157}
158
159static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct sk_buff *skb)
161{
162 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163
164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165
166 if (rp->status)
167 return;
168
169 hdev->link_policy = __le16_to_cpu(rp->policy);
170}
171
172static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 struct sk_buff *skb)
174{
175 __u8 status = *((__u8 *) skb->data);
176 void *sent;
177
178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
179
180 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 if (!sent)
182 return;
183
184 if (!status)
185 hdev->link_policy = get_unaligned_le16(sent);
186
187 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
188}
189
190static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
191{
192 __u8 status = *((__u8 *) skb->data);
193
194 BT_DBG("%s status 0x%2.2x", hdev->name, status);
195
196 clear_bit(HCI_RESET, &hdev->flags);
197
198 hci_req_complete(hdev, HCI_OP_RESET, status);
199
200 /* Reset all non-persistent flags */
201 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
202 BIT(HCI_PERIODIC_INQ));
203
204 hdev->discovery.state = DISCOVERY_STOPPED;
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
207
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
210}
211
212static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
213{
214 __u8 status = *((__u8 *) skb->data);
215 void *sent;
216
217 BT_DBG("%s status 0x%2.2x", hdev->name, status);
218
219 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
220 if (!sent)
221 return;
222
223 hci_dev_lock(hdev);
224
225 if (test_bit(HCI_MGMT, &hdev->dev_flags))
226 mgmt_set_local_name_complete(hdev, sent, status);
227 else if (!status)
228 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
229
230 hci_dev_unlock(hdev);
231
232 if (!status && !test_bit(HCI_INIT, &hdev->flags))
233 hci_update_ad(hdev);
234
235 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
236}
237
238static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
239{
240 struct hci_rp_read_local_name *rp = (void *) skb->data;
241
242 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
243
244 if (rp->status)
245 return;
246
247 if (test_bit(HCI_SETUP, &hdev->dev_flags))
248 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
249}
250
251static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
252{
253 __u8 status = *((__u8 *) skb->data);
254 void *sent;
255
256 BT_DBG("%s status 0x%2.2x", hdev->name, status);
257
258 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
259 if (!sent)
260 return;
261
262 if (!status) {
263 __u8 param = *((__u8 *) sent);
264
265 if (param == AUTH_ENABLED)
266 set_bit(HCI_AUTH, &hdev->flags);
267 else
268 clear_bit(HCI_AUTH, &hdev->flags);
269 }
270
271 if (test_bit(HCI_MGMT, &hdev->dev_flags))
272 mgmt_auth_enable_complete(hdev, status);
273
274 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
275}
276
277static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
278{
279 __u8 status = *((__u8 *) skb->data);
280 void *sent;
281
282 BT_DBG("%s status 0x%2.2x", hdev->name, status);
283
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
285 if (!sent)
286 return;
287
288 if (!status) {
289 __u8 param = *((__u8 *) sent);
290
291 if (param)
292 set_bit(HCI_ENCRYPT, &hdev->flags);
293 else
294 clear_bit(HCI_ENCRYPT, &hdev->flags);
295 }
296
297 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
298}
299
300static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
301{
302 __u8 param, status = *((__u8 *) skb->data);
303 int old_pscan, old_iscan;
304 void *sent;
305
306 BT_DBG("%s status 0x%2.2x", hdev->name, status);
307
308 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
309 if (!sent)
310 return;
311
312 param = *((__u8 *) sent);
313
314 hci_dev_lock(hdev);
315
316 if (status) {
317 mgmt_write_scan_failed(hdev, param, status);
318 hdev->discov_timeout = 0;
319 goto done;
320 }
321
322 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
323 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
324
325 if (param & SCAN_INQUIRY) {
326 set_bit(HCI_ISCAN, &hdev->flags);
327 if (!old_iscan)
328 mgmt_discoverable(hdev, 1);
329 if (hdev->discov_timeout > 0) {
330 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
331 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
332 to);
333 }
334 } else if (old_iscan)
335 mgmt_discoverable(hdev, 0);
336
337 if (param & SCAN_PAGE) {
338 set_bit(HCI_PSCAN, &hdev->flags);
339 if (!old_pscan)
340 mgmt_connectable(hdev, 1);
341 } else if (old_pscan)
342 mgmt_connectable(hdev, 0);
343
344done:
345 hci_dev_unlock(hdev);
346 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
347}
348
349static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350{
351 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
352
353 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
354
355 if (rp->status)
356 return;
357
358 memcpy(hdev->dev_class, rp->dev_class, 3);
359
360 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
361 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
362}
363
364static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
365{
366 __u8 status = *((__u8 *) skb->data);
367 void *sent;
368
369 BT_DBG("%s status 0x%2.2x", hdev->name, status);
370
371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
372 if (!sent)
373 return;
374
375 hci_dev_lock(hdev);
376
377 if (status == 0)
378 memcpy(hdev->dev_class, sent, 3);
379
380 if (test_bit(HCI_MGMT, &hdev->dev_flags))
381 mgmt_set_class_of_dev_complete(hdev, sent, status);
382
383 hci_dev_unlock(hdev);
384}
385
386static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
387{
388 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
389 __u16 setting;
390
391 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
392
393 if (rp->status)
394 return;
395
396 setting = __le16_to_cpu(rp->voice_setting);
397
398 if (hdev->voice_setting == setting)
399 return;
400
401 hdev->voice_setting = setting;
402
403 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
404
405 if (hdev->notify)
406 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
407}
408
409static void hci_cc_write_voice_setting(struct hci_dev *hdev,
410 struct sk_buff *skb)
411{
412 __u8 status = *((__u8 *) skb->data);
413 __u16 setting;
414 void *sent;
415
416 BT_DBG("%s status 0x%2.2x", hdev->name, status);
417
418 if (status)
419 return;
420
421 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
422 if (!sent)
423 return;
424
425 setting = get_unaligned_le16(sent);
426
427 if (hdev->voice_setting == setting)
428 return;
429
430 hdev->voice_setting = setting;
431
432 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
433
434 if (hdev->notify)
435 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
436}
437
438static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
439{
440 __u8 status = *((__u8 *) skb->data);
441
442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
443
444 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
445}
446
447static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
448{
449 __u8 status = *((__u8 *) skb->data);
450 struct hci_cp_write_ssp_mode *sent;
451
452 BT_DBG("%s status 0x%2.2x", hdev->name, status);
453
454 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
455 if (!sent)
456 return;
457
458 if (!status) {
459 if (sent->mode)
460 hdev->host_features[0] |= LMP_HOST_SSP;
461 else
462 hdev->host_features[0] &= ~LMP_HOST_SSP;
463 }
464
465 if (test_bit(HCI_MGMT, &hdev->dev_flags))
466 mgmt_ssp_enable_complete(hdev, sent->mode, status);
467 else if (!status) {
468 if (sent->mode)
469 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
470 else
471 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
472 }
473}
474
475static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
476{
477 struct hci_rp_read_local_version *rp = (void *) skb->data;
478
479 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
480
481 if (rp->status)
482 goto done;
483
484 hdev->hci_ver = rp->hci_ver;
485 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
486 hdev->lmp_ver = rp->lmp_ver;
487 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
488 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
489
490 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
491 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
492
493done:
494 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
495}
496
497static void hci_cc_read_local_commands(struct hci_dev *hdev,
498 struct sk_buff *skb)
499{
500 struct hci_rp_read_local_commands *rp = (void *) skb->data;
501
502 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
503
504 if (!rp->status)
505 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
506
507 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
508}
509
510static void hci_cc_read_local_features(struct hci_dev *hdev,
511 struct sk_buff *skb)
512{
513 struct hci_rp_read_local_features *rp = (void *) skb->data;
514
515 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
516
517 if (rp->status)
518 return;
519
520 memcpy(hdev->features, rp->features, 8);
521
522 /* Adjust default settings according to features
523 * supported by device. */
524
525 if (hdev->features[0] & LMP_3SLOT)
526 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
527
528 if (hdev->features[0] & LMP_5SLOT)
529 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
530
531 if (hdev->features[1] & LMP_HV2) {
532 hdev->pkt_type |= (HCI_HV2);
533 hdev->esco_type |= (ESCO_HV2);
534 }
535
536 if (hdev->features[1] & LMP_HV3) {
537 hdev->pkt_type |= (HCI_HV3);
538 hdev->esco_type |= (ESCO_HV3);
539 }
540
541 if (lmp_esco_capable(hdev))
542 hdev->esco_type |= (ESCO_EV3);
543
544 if (hdev->features[4] & LMP_EV4)
545 hdev->esco_type |= (ESCO_EV4);
546
547 if (hdev->features[4] & LMP_EV5)
548 hdev->esco_type |= (ESCO_EV5);
549
550 if (hdev->features[5] & LMP_EDR_ESCO_2M)
551 hdev->esco_type |= (ESCO_2EV3);
552
553 if (hdev->features[5] & LMP_EDR_ESCO_3M)
554 hdev->esco_type |= (ESCO_3EV3);
555
556 if (hdev->features[5] & LMP_EDR_3S_ESCO)
557 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
558
559 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
560 hdev->features[0], hdev->features[1],
561 hdev->features[2], hdev->features[3],
562 hdev->features[4], hdev->features[5],
563 hdev->features[6], hdev->features[7]);
564}
565
566static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
567 struct sk_buff *skb)
568{
569 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
570
571 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
572
573 if (rp->status)
574 goto done;
575
576 switch (rp->page) {
577 case 0:
578 memcpy(hdev->features, rp->features, 8);
579 break;
580 case 1:
581 memcpy(hdev->host_features, rp->features, 8);
582 break;
583 }
584
585done:
586 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
587}
588
589static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
590 struct sk_buff *skb)
591{
592 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
593
594 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
595
596 if (rp->status)
597 return;
598
599 hdev->flow_ctl_mode = rp->mode;
600
601 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
602}
603
604static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
605{
606 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
607
608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
609
610 if (rp->status)
611 return;
612
613 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
614 hdev->sco_mtu = rp->sco_mtu;
615 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
616 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
617
618 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
619 hdev->sco_mtu = 64;
620 hdev->sco_pkts = 8;
621 }
622
623 hdev->acl_cnt = hdev->acl_pkts;
624 hdev->sco_cnt = hdev->sco_pkts;
625
626 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
627 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
628}
629
630static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
631{
632 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
633
634 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
635
636 if (!rp->status)
637 bacpy(&hdev->bdaddr, &rp->bdaddr);
638
639 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
640}
641
642static void hci_cc_read_data_block_size(struct hci_dev *hdev,
643 struct sk_buff *skb)
644{
645 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
646
647 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
648
649 if (rp->status)
650 return;
651
652 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
653 hdev->block_len = __le16_to_cpu(rp->block_len);
654 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
655
656 hdev->block_cnt = hdev->num_blocks;
657
658 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
659 hdev->block_cnt, hdev->block_len);
660
661 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
662}
663
664static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
665{
666 __u8 status = *((__u8 *) skb->data);
667
668 BT_DBG("%s status 0x%2.2x", hdev->name, status);
669
670 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
671}
672
673static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
674 struct sk_buff *skb)
675{
676 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
677
678 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
679
680 if (rp->status)
681 goto a2mp_rsp;
682
683 hdev->amp_status = rp->amp_status;
684 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
685 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
686 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
687 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
688 hdev->amp_type = rp->amp_type;
689 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
690 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
691 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
692 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
693
694 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
695
696a2mp_rsp:
697 a2mp_send_getinfo_rsp(hdev);
698}
699
700static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
701 struct sk_buff *skb)
702{
703 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
704 struct amp_assoc *assoc = &hdev->loc_assoc;
705 size_t rem_len, frag_len;
706
707 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
708
709 if (rp->status)
710 goto a2mp_rsp;
711
712 frag_len = skb->len - sizeof(*rp);
713 rem_len = __le16_to_cpu(rp->rem_len);
714
715 if (rem_len > frag_len) {
716 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
717
718 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
719 assoc->offset += frag_len;
720
721 /* Read other fragments */
722 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
723
724 return;
725 }
726
727 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
728 assoc->len = assoc->offset + rem_len;
729 assoc->offset = 0;
730
731a2mp_rsp:
732 /* Send A2MP Rsp when all fragments are received */
733 a2mp_send_getampassoc_rsp(hdev, rp->status);
734 a2mp_send_create_phy_link_req(hdev, rp->status);
735}
736
737static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
738 struct sk_buff *skb)
739{
740 __u8 status = *((__u8 *) skb->data);
741
742 BT_DBG("%s status 0x%2.2x", hdev->name, status);
743
744 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
745}
746
747static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
748{
749 __u8 status = *((__u8 *) skb->data);
750
751 BT_DBG("%s status 0x%2.2x", hdev->name, status);
752
753 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
754}
755
756static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
757 struct sk_buff *skb)
758{
759 __u8 status = *((__u8 *) skb->data);
760
761 BT_DBG("%s status 0x%2.2x", hdev->name, status);
762
763 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
764}
765
766static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
767 struct sk_buff *skb)
768{
769 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
770
771 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
772
773 if (!rp->status)
774 hdev->inq_tx_power = rp->tx_power;
775
776 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
777}
778
779static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
780{
781 __u8 status = *((__u8 *) skb->data);
782
783 BT_DBG("%s status 0x%2.2x", hdev->name, status);
784
785 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
786}
787
788static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
789{
790 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
791 struct hci_cp_pin_code_reply *cp;
792 struct hci_conn *conn;
793
794 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
795
796 hci_dev_lock(hdev);
797
798 if (test_bit(HCI_MGMT, &hdev->dev_flags))
799 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
800
801 if (rp->status)
802 goto unlock;
803
804 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
805 if (!cp)
806 goto unlock;
807
808 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
809 if (conn)
810 conn->pin_length = cp->pin_len;
811
812unlock:
813 hci_dev_unlock(hdev);
814}
815
816static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
817{
818 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
819
820 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
821
822 hci_dev_lock(hdev);
823
824 if (test_bit(HCI_MGMT, &hdev->dev_flags))
825 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
826 rp->status);
827
828 hci_dev_unlock(hdev);
829}
830
831static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
832 struct sk_buff *skb)
833{
834 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
835
836 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
837
838 if (rp->status)
839 return;
840
841 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
842 hdev->le_pkts = rp->le_max_pkt;
843
844 hdev->le_cnt = hdev->le_pkts;
845
846 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
847
848 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
849}
850
851static void hci_cc_le_read_local_features(struct hci_dev *hdev,
852 struct sk_buff *skb)
853{
854 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
855
856 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
857
858 if (!rp->status)
859 memcpy(hdev->le_features, rp->features, 8);
860
861 hci_req_complete(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, rp->status);
862}
863
864static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
865 struct sk_buff *skb)
866{
867 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
868
869 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
870
871 if (!rp->status) {
872 hdev->adv_tx_power = rp->tx_power;
873 if (!test_bit(HCI_INIT, &hdev->flags))
874 hci_update_ad(hdev);
875 }
876
877 hci_req_complete(hdev, HCI_OP_LE_READ_ADV_TX_POWER, rp->status);
878}
879
880static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
881{
882 __u8 status = *((__u8 *) skb->data);
883
884 BT_DBG("%s status 0x%2.2x", hdev->name, status);
885
886 hci_req_complete(hdev, HCI_OP_LE_SET_EVENT_MASK, status);
887}
888
889static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
890{
891 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
892
893 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
894
895 hci_dev_lock(hdev);
896
897 if (test_bit(HCI_MGMT, &hdev->dev_flags))
898 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
899 rp->status);
900
901 hci_dev_unlock(hdev);
902}
903
904static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
905 struct sk_buff *skb)
906{
907 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
908
909 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
910
911 hci_dev_lock(hdev);
912
913 if (test_bit(HCI_MGMT, &hdev->dev_flags))
914 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
915 ACL_LINK, 0, rp->status);
916
917 hci_dev_unlock(hdev);
918}
919
920static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
921{
922 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
923
924 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
925
926 hci_dev_lock(hdev);
927
928 if (test_bit(HCI_MGMT, &hdev->dev_flags))
929 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
930 0, rp->status);
931
932 hci_dev_unlock(hdev);
933}
934
935static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
936 struct sk_buff *skb)
937{
938 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
939
940 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
941
942 hci_dev_lock(hdev);
943
944 if (test_bit(HCI_MGMT, &hdev->dev_flags))
945 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
946 ACL_LINK, 0, rp->status);
947
948 hci_dev_unlock(hdev);
949}
950
951static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
952 struct sk_buff *skb)
953{
954 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
955
956 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
957
958 hci_dev_lock(hdev);
959 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
960 rp->randomizer, rp->status);
961 hci_dev_unlock(hdev);
962}
963
964static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
965{
966 __u8 *sent, status = *((__u8 *) skb->data);
967
968 BT_DBG("%s status 0x%2.2x", hdev->name, status);
969
970 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
971 if (!sent)
972 return;
973
974 hci_dev_lock(hdev);
975
976 if (!status) {
977 if (*sent)
978 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
979 else
980 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
981 }
982
983 hci_dev_unlock(hdev);
984
985 if (!test_bit(HCI_INIT, &hdev->flags))
986 hci_update_ad(hdev);
987
988 hci_req_complete(hdev, HCI_OP_LE_SET_ADV_ENABLE, status);
989}
990
991static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
992{
993 __u8 status = *((__u8 *) skb->data);
994
995 BT_DBG("%s status 0x%2.2x", hdev->name, status);
996
997 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
998
999 if (status) {
1000 hci_dev_lock(hdev);
1001 mgmt_start_discovery_failed(hdev, status);
1002 hci_dev_unlock(hdev);
1003 return;
1004 }
1005}
1006
1007static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1008 struct sk_buff *skb)
1009{
1010 struct hci_cp_le_set_scan_enable *cp;
1011 __u8 status = *((__u8 *) skb->data);
1012
1013 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1014
1015 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1016 if (!cp)
1017 return;
1018
1019 switch (cp->enable) {
1020 case LE_SCANNING_ENABLED:
1021 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1022
1023 if (status) {
1024 hci_dev_lock(hdev);
1025 mgmt_start_discovery_failed(hdev, status);
1026 hci_dev_unlock(hdev);
1027 return;
1028 }
1029
1030 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1031
1032 hci_dev_lock(hdev);
1033 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1034 hci_dev_unlock(hdev);
1035 break;
1036
1037 case LE_SCANNING_DISABLED:
1038 if (status) {
1039 hci_dev_lock(hdev);
1040 mgmt_stop_discovery_failed(hdev, status);
1041 hci_dev_unlock(hdev);
1042 return;
1043 }
1044
1045 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1046
1047 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1048 hdev->discovery.state == DISCOVERY_FINDING) {
1049 mgmt_interleaved_discovery(hdev);
1050 } else {
1051 hci_dev_lock(hdev);
1052 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1053 hci_dev_unlock(hdev);
1054 }
1055
1056 break;
1057
1058 default:
1059 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1060 break;
1061 }
1062}
1063
1064static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1065 struct sk_buff *skb)
1066{
1067 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1068
1069 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1070
1071 if (!rp->status)
1072 hdev->le_white_list_size = rp->size;
1073
1074 hci_req_complete(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, rp->status);
1075}
1076
1077static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1078{
1079 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1080
1081 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1082
1083 if (rp->status)
1084 return;
1085
1086 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1087}
1088
1089static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1090{
1091 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1092
1093 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1094
1095 if (rp->status)
1096 return;
1097
1098 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1099}
1100
1101static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1102 struct sk_buff *skb)
1103{
1104 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1105
1106 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1107
1108 if (!rp->status)
1109 memcpy(hdev->le_states, rp->le_states, 8);
1110
1111 hci_req_complete(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, rp->status);
1112}
1113
1114static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1115 struct sk_buff *skb)
1116{
1117 struct hci_cp_write_le_host_supported *sent;
1118 __u8 status = *((__u8 *) skb->data);
1119
1120 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1121
1122 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1123 if (!sent)
1124 return;
1125
1126 if (!status) {
1127 if (sent->le)
1128 hdev->host_features[0] |= LMP_HOST_LE;
1129 else
1130 hdev->host_features[0] &= ~LMP_HOST_LE;
1131
1132 if (sent->simul)
1133 hdev->host_features[0] |= LMP_HOST_LE_BREDR;
1134 else
1135 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
1136 }
1137
1138 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1139 !test_bit(HCI_INIT, &hdev->flags))
1140 mgmt_le_enable_complete(hdev, sent->le, status);
1141
1142 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1143}
1144
1145static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1146 struct sk_buff *skb)
1147{
1148 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1149
1150 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1151 hdev->name, rp->status, rp->phy_handle);
1152
1153 if (rp->status)
1154 return;
1155
1156 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1157}
1158
1159static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1160{
1161 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1162
1163 if (status) {
1164 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1165 hci_conn_check_pending(hdev);
1166 hci_dev_lock(hdev);
1167 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1168 mgmt_start_discovery_failed(hdev, status);
1169 hci_dev_unlock(hdev);
1170 return;
1171 }
1172
1173 set_bit(HCI_INQUIRY, &hdev->flags);
1174
1175 hci_dev_lock(hdev);
1176 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1177 hci_dev_unlock(hdev);
1178}
1179
1180static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1181{
1182 struct hci_cp_create_conn *cp;
1183 struct hci_conn *conn;
1184
1185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1186
1187 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1188 if (!cp)
1189 return;
1190
1191 hci_dev_lock(hdev);
1192
1193 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1194
1195 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1196
1197 if (status) {
1198 if (conn && conn->state == BT_CONNECT) {
1199 if (status != 0x0c || conn->attempt > 2) {
1200 conn->state = BT_CLOSED;
1201 hci_proto_connect_cfm(conn, status);
1202 hci_conn_del(conn);
1203 } else
1204 conn->state = BT_CONNECT2;
1205 }
1206 } else {
1207 if (!conn) {
1208 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1209 if (conn) {
1210 conn->out = true;
1211 conn->link_mode |= HCI_LM_MASTER;
1212 } else
1213 BT_ERR("No memory for new connection");
1214 }
1215 }
1216
1217 hci_dev_unlock(hdev);
1218}
1219
1220static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1221{
1222 struct hci_cp_add_sco *cp;
1223 struct hci_conn *acl, *sco;
1224 __u16 handle;
1225
1226 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1227
1228 if (!status)
1229 return;
1230
1231 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1232 if (!cp)
1233 return;
1234
1235 handle = __le16_to_cpu(cp->handle);
1236
1237 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1238
1239 hci_dev_lock(hdev);
1240
1241 acl = hci_conn_hash_lookup_handle(hdev, handle);
1242 if (acl) {
1243 sco = acl->link;
1244 if (sco) {
1245 sco->state = BT_CLOSED;
1246
1247 hci_proto_connect_cfm(sco, status);
1248 hci_conn_del(sco);
1249 }
1250 }
1251
1252 hci_dev_unlock(hdev);
1253}
1254
1255static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1256{
1257 struct hci_cp_auth_requested *cp;
1258 struct hci_conn *conn;
1259
1260 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1261
1262 if (!status)
1263 return;
1264
1265 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1266 if (!cp)
1267 return;
1268
1269 hci_dev_lock(hdev);
1270
1271 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1272 if (conn) {
1273 if (conn->state == BT_CONFIG) {
1274 hci_proto_connect_cfm(conn, status);
1275 hci_conn_put(conn);
1276 }
1277 }
1278
1279 hci_dev_unlock(hdev);
1280}
1281
1282static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1283{
1284 struct hci_cp_set_conn_encrypt *cp;
1285 struct hci_conn *conn;
1286
1287 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1288
1289 if (!status)
1290 return;
1291
1292 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1293 if (!cp)
1294 return;
1295
1296 hci_dev_lock(hdev);
1297
1298 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1299 if (conn) {
1300 if (conn->state == BT_CONFIG) {
1301 hci_proto_connect_cfm(conn, status);
1302 hci_conn_put(conn);
1303 }
1304 }
1305
1306 hci_dev_unlock(hdev);
1307}
1308
1309static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1310 struct hci_conn *conn)
1311{
1312 if (conn->state != BT_CONFIG || !conn->out)
1313 return 0;
1314
1315 if (conn->pending_sec_level == BT_SECURITY_SDP)
1316 return 0;
1317
1318 /* Only request authentication for SSP connections or non-SSP
1319 * devices with sec_level HIGH or if MITM protection is requested */
1320 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1321 conn->pending_sec_level != BT_SECURITY_HIGH)
1322 return 0;
1323
1324 return 1;
1325}
1326
1327static int hci_resolve_name(struct hci_dev *hdev,
1328 struct inquiry_entry *e)
1329{
1330 struct hci_cp_remote_name_req cp;
1331
1332 memset(&cp, 0, sizeof(cp));
1333
1334 bacpy(&cp.bdaddr, &e->data.bdaddr);
1335 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1336 cp.pscan_mode = e->data.pscan_mode;
1337 cp.clock_offset = e->data.clock_offset;
1338
1339 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1340}
1341
1342static bool hci_resolve_next_name(struct hci_dev *hdev)
1343{
1344 struct discovery_state *discov = &hdev->discovery;
1345 struct inquiry_entry *e;
1346
1347 if (list_empty(&discov->resolve))
1348 return false;
1349
1350 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1351 if (!e)
1352 return false;
1353
1354 if (hci_resolve_name(hdev, e) == 0) {
1355 e->name_state = NAME_PENDING;
1356 return true;
1357 }
1358
1359 return false;
1360}
1361
1362static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1363 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1364{
1365 struct discovery_state *discov = &hdev->discovery;
1366 struct inquiry_entry *e;
1367
1368 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1369 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1370 name_len, conn->dev_class);
1371
1372 if (discov->state == DISCOVERY_STOPPED)
1373 return;
1374
1375 if (discov->state == DISCOVERY_STOPPING)
1376 goto discov_complete;
1377
1378 if (discov->state != DISCOVERY_RESOLVING)
1379 return;
1380
1381 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1382 /* If the device was not found in a list of found devices names of which
1383 * are pending. there is no need to continue resolving a next name as it
1384 * will be done upon receiving another Remote Name Request Complete
1385 * Event */
1386 if (!e)
1387 return;
1388
1389 list_del(&e->list);
1390 if (name) {
1391 e->name_state = NAME_KNOWN;
1392 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1393 e->data.rssi, name, name_len);
1394 } else {
1395 e->name_state = NAME_NOT_KNOWN;
1396 }
1397
1398 if (hci_resolve_next_name(hdev))
1399 return;
1400
1401discov_complete:
1402 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1403}
1404
1405static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1406{
1407 struct hci_cp_remote_name_req *cp;
1408 struct hci_conn *conn;
1409
1410 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1411
1412 /* If successful wait for the name req complete event before
1413 * checking for the need to do authentication */
1414 if (!status)
1415 return;
1416
1417 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1418 if (!cp)
1419 return;
1420
1421 hci_dev_lock(hdev);
1422
1423 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1424
1425 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1426 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1427
1428 if (!conn)
1429 goto unlock;
1430
1431 if (!hci_outgoing_auth_needed(hdev, conn))
1432 goto unlock;
1433
1434 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1435 struct hci_cp_auth_requested cp;
1436 cp.handle = __cpu_to_le16(conn->handle);
1437 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1438 }
1439
1440unlock:
1441 hci_dev_unlock(hdev);
1442}
1443
1444static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1445{
1446 struct hci_cp_read_remote_features *cp;
1447 struct hci_conn *conn;
1448
1449 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1450
1451 if (!status)
1452 return;
1453
1454 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1455 if (!cp)
1456 return;
1457
1458 hci_dev_lock(hdev);
1459
1460 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1461 if (conn) {
1462 if (conn->state == BT_CONFIG) {
1463 hci_proto_connect_cfm(conn, status);
1464 hci_conn_put(conn);
1465 }
1466 }
1467
1468 hci_dev_unlock(hdev);
1469}
1470
1471static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1472{
1473 struct hci_cp_read_remote_ext_features *cp;
1474 struct hci_conn *conn;
1475
1476 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1477
1478 if (!status)
1479 return;
1480
1481 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1482 if (!cp)
1483 return;
1484
1485 hci_dev_lock(hdev);
1486
1487 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1488 if (conn) {
1489 if (conn->state == BT_CONFIG) {
1490 hci_proto_connect_cfm(conn, status);
1491 hci_conn_put(conn);
1492 }
1493 }
1494
1495 hci_dev_unlock(hdev);
1496}
1497
1498static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1499{
1500 struct hci_cp_setup_sync_conn *cp;
1501 struct hci_conn *acl, *sco;
1502 __u16 handle;
1503
1504 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1505
1506 if (!status)
1507 return;
1508
1509 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1510 if (!cp)
1511 return;
1512
1513 handle = __le16_to_cpu(cp->handle);
1514
1515 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1516
1517 hci_dev_lock(hdev);
1518
1519 acl = hci_conn_hash_lookup_handle(hdev, handle);
1520 if (acl) {
1521 sco = acl->link;
1522 if (sco) {
1523 sco->state = BT_CLOSED;
1524
1525 hci_proto_connect_cfm(sco, status);
1526 hci_conn_del(sco);
1527 }
1528 }
1529
1530 hci_dev_unlock(hdev);
1531}
1532
1533static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1534{
1535 struct hci_cp_sniff_mode *cp;
1536 struct hci_conn *conn;
1537
1538 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1539
1540 if (!status)
1541 return;
1542
1543 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1544 if (!cp)
1545 return;
1546
1547 hci_dev_lock(hdev);
1548
1549 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1550 if (conn) {
1551 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1552
1553 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1554 hci_sco_setup(conn, status);
1555 }
1556
1557 hci_dev_unlock(hdev);
1558}
1559
1560static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1561{
1562 struct hci_cp_exit_sniff_mode *cp;
1563 struct hci_conn *conn;
1564
1565 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1566
1567 if (!status)
1568 return;
1569
1570 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1571 if (!cp)
1572 return;
1573
1574 hci_dev_lock(hdev);
1575
1576 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1577 if (conn) {
1578 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1579
1580 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1581 hci_sco_setup(conn, status);
1582 }
1583
1584 hci_dev_unlock(hdev);
1585}
1586
1587static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1588{
1589 struct hci_cp_disconnect *cp;
1590 struct hci_conn *conn;
1591
1592 if (!status)
1593 return;
1594
1595 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1596 if (!cp)
1597 return;
1598
1599 hci_dev_lock(hdev);
1600
1601 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1602 if (conn)
1603 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1604 conn->dst_type, status);
1605
1606 hci_dev_unlock(hdev);
1607}
1608
1609static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1610{
1611 struct hci_conn *conn;
1612
1613 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1614
1615 if (status) {
1616 hci_dev_lock(hdev);
1617
1618 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1619 if (!conn) {
1620 hci_dev_unlock(hdev);
1621 return;
1622 }
1623
1624 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1625
1626 conn->state = BT_CLOSED;
1627 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1628 conn->dst_type, status);
1629 hci_proto_connect_cfm(conn, status);
1630 hci_conn_del(conn);
1631
1632 hci_dev_unlock(hdev);
1633 }
1634}
1635
1636static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1637{
1638 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1639}
1640
1641static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1642{
1643 struct hci_cp_create_phy_link *cp;
1644
1645 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1646
1647 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1648 if (!cp)
1649 return;
1650
1651 hci_dev_lock(hdev);
1652
1653 if (status) {
1654 struct hci_conn *hcon;
1655
1656 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1657 if (hcon)
1658 hci_conn_del(hcon);
1659 } else {
1660 amp_write_remote_assoc(hdev, cp->phy_handle);
1661 }
1662
1663 hci_dev_unlock(hdev);
1664}
1665
1666static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1667{
1668 struct hci_cp_accept_phy_link *cp;
1669
1670 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1671
1672 if (status)
1673 return;
1674
1675 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1676 if (!cp)
1677 return;
1678
1679 amp_write_remote_assoc(hdev, cp->phy_handle);
1680}
1681
1682static void hci_cs_create_logical_link(struct hci_dev *hdev, u8 status)
1683{
1684 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1685}
1686
1687static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1688{
1689 __u8 status = *((__u8 *) skb->data);
1690 struct discovery_state *discov = &hdev->discovery;
1691 struct inquiry_entry *e;
1692
1693 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1694
1695 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1696
1697 hci_conn_check_pending(hdev);
1698
1699 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1700 return;
1701
1702 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1703 return;
1704
1705 hci_dev_lock(hdev);
1706
1707 if (discov->state != DISCOVERY_FINDING)
1708 goto unlock;
1709
1710 if (list_empty(&discov->resolve)) {
1711 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1712 goto unlock;
1713 }
1714
1715 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1716 if (e && hci_resolve_name(hdev, e) == 0) {
1717 e->name_state = NAME_PENDING;
1718 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1719 } else {
1720 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1721 }
1722
1723unlock:
1724 hci_dev_unlock(hdev);
1725}
1726
1727static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1728{
1729 struct inquiry_data data;
1730 struct inquiry_info *info = (void *) (skb->data + 1);
1731 int num_rsp = *((__u8 *) skb->data);
1732
1733 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1734
1735 if (!num_rsp)
1736 return;
1737
1738 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1739 return;
1740
1741 hci_dev_lock(hdev);
1742
1743 for (; num_rsp; num_rsp--, info++) {
1744 bool name_known, ssp;
1745
1746 bacpy(&data.bdaddr, &info->bdaddr);
1747 data.pscan_rep_mode = info->pscan_rep_mode;
1748 data.pscan_period_mode = info->pscan_period_mode;
1749 data.pscan_mode = info->pscan_mode;
1750 memcpy(data.dev_class, info->dev_class, 3);
1751 data.clock_offset = info->clock_offset;
1752 data.rssi = 0x00;
1753 data.ssp_mode = 0x00;
1754
1755 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1756 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1757 info->dev_class, 0, !name_known, ssp, NULL,
1758 0);
1759 }
1760
1761 hci_dev_unlock(hdev);
1762}
1763
1764static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1765{
1766 struct hci_ev_conn_complete *ev = (void *) skb->data;
1767 struct hci_conn *conn;
1768
1769 BT_DBG("%s", hdev->name);
1770
1771 hci_dev_lock(hdev);
1772
1773 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1774 if (!conn) {
1775 if (ev->link_type != SCO_LINK)
1776 goto unlock;
1777
1778 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1779 if (!conn)
1780 goto unlock;
1781
1782 conn->type = SCO_LINK;
1783 }
1784
1785 if (!ev->status) {
1786 conn->handle = __le16_to_cpu(ev->handle);
1787
1788 if (conn->type == ACL_LINK) {
1789 conn->state = BT_CONFIG;
1790 hci_conn_hold(conn);
1791
1792 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1793 !hci_find_link_key(hdev, &ev->bdaddr))
1794 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1795 else
1796 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1797 } else
1798 conn->state = BT_CONNECTED;
1799
1800 hci_conn_hold_device(conn);
1801 hci_conn_add_sysfs(conn);
1802
1803 if (test_bit(HCI_AUTH, &hdev->flags))
1804 conn->link_mode |= HCI_LM_AUTH;
1805
1806 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1807 conn->link_mode |= HCI_LM_ENCRYPT;
1808
1809 /* Get remote features */
1810 if (conn->type == ACL_LINK) {
1811 struct hci_cp_read_remote_features cp;
1812 cp.handle = ev->handle;
1813 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1814 sizeof(cp), &cp);
1815 }
1816
1817 /* Set packet type for incoming connection */
1818 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1819 struct hci_cp_change_conn_ptype cp;
1820 cp.handle = ev->handle;
1821 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1822 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1823 &cp);
1824 }
1825 } else {
1826 conn->state = BT_CLOSED;
1827 if (conn->type == ACL_LINK)
1828 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1829 conn->dst_type, ev->status);
1830 }
1831
1832 if (conn->type == ACL_LINK)
1833 hci_sco_setup(conn, ev->status);
1834
1835 if (ev->status) {
1836 hci_proto_connect_cfm(conn, ev->status);
1837 hci_conn_del(conn);
1838 } else if (ev->link_type != ACL_LINK)
1839 hci_proto_connect_cfm(conn, ev->status);
1840
1841unlock:
1842 hci_dev_unlock(hdev);
1843
1844 hci_conn_check_pending(hdev);
1845}
1846
1847void hci_conn_accept(struct hci_conn *conn, int mask)
1848{
1849 struct hci_dev *hdev = conn->hdev;
1850
1851 BT_DBG("conn %p", conn);
1852
1853 conn->state = BT_CONFIG;
1854
1855 if (!lmp_esco_capable(hdev)) {
1856 struct hci_cp_accept_conn_req cp;
1857
1858 bacpy(&cp.bdaddr, &conn->dst);
1859
1860 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1861 cp.role = 0x00; /* Become master */
1862 else
1863 cp.role = 0x01; /* Remain slave */
1864
1865 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
1866 } else /* lmp_esco_capable(hdev)) */ {
1867 struct hci_cp_accept_sync_conn_req cp;
1868
1869 bacpy(&cp.bdaddr, &conn->dst);
1870 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1871
1872 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1873 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1874 cp.max_latency = __constant_cpu_to_le16(0xffff);
1875 cp.content_format = cpu_to_le16(hdev->voice_setting);
1876 cp.retrans_effort = 0xff;
1877
1878 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1879 sizeof(cp), &cp);
1880 }
1881}
1882
1883static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1884{
1885 struct hci_ev_conn_request *ev = (void *) skb->data;
1886 int mask = hdev->link_mode;
1887 __u8 flags = 0;
1888
1889 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1890 ev->link_type);
1891
1892 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1893 &flags);
1894
1895 if ((mask & HCI_LM_ACCEPT) &&
1896 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1897 /* Connection accepted */
1898 struct inquiry_entry *ie;
1899 struct hci_conn *conn;
1900
1901 hci_dev_lock(hdev);
1902
1903 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1904 if (ie)
1905 memcpy(ie->data.dev_class, ev->dev_class, 3);
1906
1907 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1908 &ev->bdaddr);
1909 if (!conn) {
1910 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1911 if (!conn) {
1912 BT_ERR("No memory for new connection");
1913 hci_dev_unlock(hdev);
1914 return;
1915 }
1916 }
1917
1918 memcpy(conn->dev_class, ev->dev_class, 3);
1919
1920 hci_dev_unlock(hdev);
1921
1922 if (ev->link_type == ACL_LINK ||
1923 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1924 struct hci_cp_accept_conn_req cp;
1925 conn->state = BT_CONNECT;
1926
1927 bacpy(&cp.bdaddr, &ev->bdaddr);
1928
1929 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1930 cp.role = 0x00; /* Become master */
1931 else
1932 cp.role = 0x01; /* Remain slave */
1933
1934 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1935 &cp);
1936 } else if (!(flags & HCI_PROTO_DEFER)) {
1937 struct hci_cp_accept_sync_conn_req cp;
1938 conn->state = BT_CONNECT;
1939
1940 bacpy(&cp.bdaddr, &ev->bdaddr);
1941 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1942
1943 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1944 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1945 cp.max_latency = __constant_cpu_to_le16(0xffff);
1946 cp.content_format = cpu_to_le16(hdev->voice_setting);
1947 cp.retrans_effort = 0xff;
1948
1949 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1950 sizeof(cp), &cp);
1951 } else {
1952 conn->state = BT_CONNECT2;
1953 hci_proto_connect_cfm(conn, 0);
1954 hci_conn_put(conn);
1955 }
1956 } else {
1957 /* Connection rejected */
1958 struct hci_cp_reject_conn_req cp;
1959
1960 bacpy(&cp.bdaddr, &ev->bdaddr);
1961 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1962 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1963 }
1964}
1965
1966static u8 hci_to_mgmt_reason(u8 err)
1967{
1968 switch (err) {
1969 case HCI_ERROR_CONNECTION_TIMEOUT:
1970 return MGMT_DEV_DISCONN_TIMEOUT;
1971 case HCI_ERROR_REMOTE_USER_TERM:
1972 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1973 case HCI_ERROR_REMOTE_POWER_OFF:
1974 return MGMT_DEV_DISCONN_REMOTE;
1975 case HCI_ERROR_LOCAL_HOST_TERM:
1976 return MGMT_DEV_DISCONN_LOCAL_HOST;
1977 default:
1978 return MGMT_DEV_DISCONN_UNKNOWN;
1979 }
1980}
1981
1982static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1983{
1984 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1985 struct hci_conn *conn;
1986
1987 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1988
1989 hci_dev_lock(hdev);
1990
1991 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1992 if (!conn)
1993 goto unlock;
1994
1995 if (ev->status == 0)
1996 conn->state = BT_CLOSED;
1997
1998 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1999 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
2000 if (ev->status) {
2001 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2002 conn->dst_type, ev->status);
2003 } else {
2004 u8 reason = hci_to_mgmt_reason(ev->reason);
2005
2006 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
2007 conn->dst_type, reason);
2008 }
2009 }
2010
2011 if (ev->status == 0) {
2012 if (conn->type == ACL_LINK && conn->flush_key)
2013 hci_remove_link_key(hdev, &conn->dst);
2014 hci_proto_disconn_cfm(conn, ev->reason);
2015 hci_conn_del(conn);
2016 }
2017
2018unlock:
2019 hci_dev_unlock(hdev);
2020}
2021
2022static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2023{
2024 struct hci_ev_auth_complete *ev = (void *) skb->data;
2025 struct hci_conn *conn;
2026
2027 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2028
2029 hci_dev_lock(hdev);
2030
2031 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2032 if (!conn)
2033 goto unlock;
2034
2035 if (!ev->status) {
2036 if (!hci_conn_ssp_enabled(conn) &&
2037 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2038 BT_INFO("re-auth of legacy device is not possible.");
2039 } else {
2040 conn->link_mode |= HCI_LM_AUTH;
2041 conn->sec_level = conn->pending_sec_level;
2042 }
2043 } else {
2044 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2045 ev->status);
2046 }
2047
2048 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2049 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2050
2051 if (conn->state == BT_CONFIG) {
2052 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2053 struct hci_cp_set_conn_encrypt cp;
2054 cp.handle = ev->handle;
2055 cp.encrypt = 0x01;
2056 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2057 &cp);
2058 } else {
2059 conn->state = BT_CONNECTED;
2060 hci_proto_connect_cfm(conn, ev->status);
2061 hci_conn_put(conn);
2062 }
2063 } else {
2064 hci_auth_cfm(conn, ev->status);
2065
2066 hci_conn_hold(conn);
2067 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2068 hci_conn_put(conn);
2069 }
2070
2071 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2072 if (!ev->status) {
2073 struct hci_cp_set_conn_encrypt cp;
2074 cp.handle = ev->handle;
2075 cp.encrypt = 0x01;
2076 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2077 &cp);
2078 } else {
2079 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2080 hci_encrypt_cfm(conn, ev->status, 0x00);
2081 }
2082 }
2083
2084unlock:
2085 hci_dev_unlock(hdev);
2086}
2087
2088static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2089{
2090 struct hci_ev_remote_name *ev = (void *) skb->data;
2091 struct hci_conn *conn;
2092
2093 BT_DBG("%s", hdev->name);
2094
2095 hci_conn_check_pending(hdev);
2096
2097 hci_dev_lock(hdev);
2098
2099 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2100
2101 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2102 goto check_auth;
2103
2104 if (ev->status == 0)
2105 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2106 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2107 else
2108 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2109
2110check_auth:
2111 if (!conn)
2112 goto unlock;
2113
2114 if (!hci_outgoing_auth_needed(hdev, conn))
2115 goto unlock;
2116
2117 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2118 struct hci_cp_auth_requested cp;
2119 cp.handle = __cpu_to_le16(conn->handle);
2120 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2121 }
2122
2123unlock:
2124 hci_dev_unlock(hdev);
2125}
2126
2127static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2128{
2129 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2130 struct hci_conn *conn;
2131
2132 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2133
2134 hci_dev_lock(hdev);
2135
2136 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2137 if (conn) {
2138 if (!ev->status) {
2139 if (ev->encrypt) {
2140 /* Encryption implies authentication */
2141 conn->link_mode |= HCI_LM_AUTH;
2142 conn->link_mode |= HCI_LM_ENCRYPT;
2143 conn->sec_level = conn->pending_sec_level;
2144 } else
2145 conn->link_mode &= ~HCI_LM_ENCRYPT;
2146 }
2147
2148 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2149
2150 if (ev->status && conn->state == BT_CONNECTED) {
2151 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2152 hci_conn_put(conn);
2153 goto unlock;
2154 }
2155
2156 if (conn->state == BT_CONFIG) {
2157 if (!ev->status)
2158 conn->state = BT_CONNECTED;
2159
2160 hci_proto_connect_cfm(conn, ev->status);
2161 hci_conn_put(conn);
2162 } else
2163 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2164 }
2165
2166unlock:
2167 hci_dev_unlock(hdev);
2168}
2169
2170static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2171 struct sk_buff *skb)
2172{
2173 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2174 struct hci_conn *conn;
2175
2176 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2177
2178 hci_dev_lock(hdev);
2179
2180 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2181 if (conn) {
2182 if (!ev->status)
2183 conn->link_mode |= HCI_LM_SECURE;
2184
2185 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2186
2187 hci_key_change_cfm(conn, ev->status);
2188 }
2189
2190 hci_dev_unlock(hdev);
2191}
2192
2193static void hci_remote_features_evt(struct hci_dev *hdev,
2194 struct sk_buff *skb)
2195{
2196 struct hci_ev_remote_features *ev = (void *) skb->data;
2197 struct hci_conn *conn;
2198
2199 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2200
2201 hci_dev_lock(hdev);
2202
2203 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2204 if (!conn)
2205 goto unlock;
2206
2207 if (!ev->status)
2208 memcpy(conn->features, ev->features, 8);
2209
2210 if (conn->state != BT_CONFIG)
2211 goto unlock;
2212
2213 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2214 struct hci_cp_read_remote_ext_features cp;
2215 cp.handle = ev->handle;
2216 cp.page = 0x01;
2217 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2218 sizeof(cp), &cp);
2219 goto unlock;
2220 }
2221
2222 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2223 struct hci_cp_remote_name_req cp;
2224 memset(&cp, 0, sizeof(cp));
2225 bacpy(&cp.bdaddr, &conn->dst);
2226 cp.pscan_rep_mode = 0x02;
2227 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2228 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2229 mgmt_device_connected(hdev, &conn->dst, conn->type,
2230 conn->dst_type, 0, NULL, 0,
2231 conn->dev_class);
2232
2233 if (!hci_outgoing_auth_needed(hdev, conn)) {
2234 conn->state = BT_CONNECTED;
2235 hci_proto_connect_cfm(conn, ev->status);
2236 hci_conn_put(conn);
2237 }
2238
2239unlock:
2240 hci_dev_unlock(hdev);
2241}
2242
2243static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2244{
2245 BT_DBG("%s", hdev->name);
2246}
2247
2248static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2249 struct sk_buff *skb)
2250{
2251 BT_DBG("%s", hdev->name);
2252}
2253
2254static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2255{
2256 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2257 __u16 opcode;
2258
2259 skb_pull(skb, sizeof(*ev));
2260
2261 opcode = __le16_to_cpu(ev->opcode);
2262
2263 switch (opcode) {
2264 case HCI_OP_INQUIRY_CANCEL:
2265 hci_cc_inquiry_cancel(hdev, skb);
2266 break;
2267
2268 case HCI_OP_PERIODIC_INQ:
2269 hci_cc_periodic_inq(hdev, skb);
2270 break;
2271
2272 case HCI_OP_EXIT_PERIODIC_INQ:
2273 hci_cc_exit_periodic_inq(hdev, skb);
2274 break;
2275
2276 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2277 hci_cc_remote_name_req_cancel(hdev, skb);
2278 break;
2279
2280 case HCI_OP_ROLE_DISCOVERY:
2281 hci_cc_role_discovery(hdev, skb);
2282 break;
2283
2284 case HCI_OP_READ_LINK_POLICY:
2285 hci_cc_read_link_policy(hdev, skb);
2286 break;
2287
2288 case HCI_OP_WRITE_LINK_POLICY:
2289 hci_cc_write_link_policy(hdev, skb);
2290 break;
2291
2292 case HCI_OP_READ_DEF_LINK_POLICY:
2293 hci_cc_read_def_link_policy(hdev, skb);
2294 break;
2295
2296 case HCI_OP_WRITE_DEF_LINK_POLICY:
2297 hci_cc_write_def_link_policy(hdev, skb);
2298 break;
2299
2300 case HCI_OP_RESET:
2301 hci_cc_reset(hdev, skb);
2302 break;
2303
2304 case HCI_OP_WRITE_LOCAL_NAME:
2305 hci_cc_write_local_name(hdev, skb);
2306 break;
2307
2308 case HCI_OP_READ_LOCAL_NAME:
2309 hci_cc_read_local_name(hdev, skb);
2310 break;
2311
2312 case HCI_OP_WRITE_AUTH_ENABLE:
2313 hci_cc_write_auth_enable(hdev, skb);
2314 break;
2315
2316 case HCI_OP_WRITE_ENCRYPT_MODE:
2317 hci_cc_write_encrypt_mode(hdev, skb);
2318 break;
2319
2320 case HCI_OP_WRITE_SCAN_ENABLE:
2321 hci_cc_write_scan_enable(hdev, skb);
2322 break;
2323
2324 case HCI_OP_READ_CLASS_OF_DEV:
2325 hci_cc_read_class_of_dev(hdev, skb);
2326 break;
2327
2328 case HCI_OP_WRITE_CLASS_OF_DEV:
2329 hci_cc_write_class_of_dev(hdev, skb);
2330 break;
2331
2332 case HCI_OP_READ_VOICE_SETTING:
2333 hci_cc_read_voice_setting(hdev, skb);
2334 break;
2335
2336 case HCI_OP_WRITE_VOICE_SETTING:
2337 hci_cc_write_voice_setting(hdev, skb);
2338 break;
2339
2340 case HCI_OP_HOST_BUFFER_SIZE:
2341 hci_cc_host_buffer_size(hdev, skb);
2342 break;
2343
2344 case HCI_OP_WRITE_SSP_MODE:
2345 hci_cc_write_ssp_mode(hdev, skb);
2346 break;
2347
2348 case HCI_OP_READ_LOCAL_VERSION:
2349 hci_cc_read_local_version(hdev, skb);
2350 break;
2351
2352 case HCI_OP_READ_LOCAL_COMMANDS:
2353 hci_cc_read_local_commands(hdev, skb);
2354 break;
2355
2356 case HCI_OP_READ_LOCAL_FEATURES:
2357 hci_cc_read_local_features(hdev, skb);
2358 break;
2359
2360 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2361 hci_cc_read_local_ext_features(hdev, skb);
2362 break;
2363
2364 case HCI_OP_READ_BUFFER_SIZE:
2365 hci_cc_read_buffer_size(hdev, skb);
2366 break;
2367
2368 case HCI_OP_READ_BD_ADDR:
2369 hci_cc_read_bd_addr(hdev, skb);
2370 break;
2371
2372 case HCI_OP_READ_DATA_BLOCK_SIZE:
2373 hci_cc_read_data_block_size(hdev, skb);
2374 break;
2375
2376 case HCI_OP_WRITE_CA_TIMEOUT:
2377 hci_cc_write_ca_timeout(hdev, skb);
2378 break;
2379
2380 case HCI_OP_READ_FLOW_CONTROL_MODE:
2381 hci_cc_read_flow_control_mode(hdev, skb);
2382 break;
2383
2384 case HCI_OP_READ_LOCAL_AMP_INFO:
2385 hci_cc_read_local_amp_info(hdev, skb);
2386 break;
2387
2388 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2389 hci_cc_read_local_amp_assoc(hdev, skb);
2390 break;
2391
2392 case HCI_OP_DELETE_STORED_LINK_KEY:
2393 hci_cc_delete_stored_link_key(hdev, skb);
2394 break;
2395
2396 case HCI_OP_SET_EVENT_MASK:
2397 hci_cc_set_event_mask(hdev, skb);
2398 break;
2399
2400 case HCI_OP_WRITE_INQUIRY_MODE:
2401 hci_cc_write_inquiry_mode(hdev, skb);
2402 break;
2403
2404 case HCI_OP_READ_INQ_RSP_TX_POWER:
2405 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2406 break;
2407
2408 case HCI_OP_SET_EVENT_FLT:
2409 hci_cc_set_event_flt(hdev, skb);
2410 break;
2411
2412 case HCI_OP_PIN_CODE_REPLY:
2413 hci_cc_pin_code_reply(hdev, skb);
2414 break;
2415
2416 case HCI_OP_PIN_CODE_NEG_REPLY:
2417 hci_cc_pin_code_neg_reply(hdev, skb);
2418 break;
2419
2420 case HCI_OP_READ_LOCAL_OOB_DATA:
2421 hci_cc_read_local_oob_data_reply(hdev, skb);
2422 break;
2423
2424 case HCI_OP_LE_READ_BUFFER_SIZE:
2425 hci_cc_le_read_buffer_size(hdev, skb);
2426 break;
2427
2428 case HCI_OP_LE_READ_LOCAL_FEATURES:
2429 hci_cc_le_read_local_features(hdev, skb);
2430 break;
2431
2432 case HCI_OP_LE_READ_ADV_TX_POWER:
2433 hci_cc_le_read_adv_tx_power(hdev, skb);
2434 break;
2435
2436 case HCI_OP_LE_SET_EVENT_MASK:
2437 hci_cc_le_set_event_mask(hdev, skb);
2438 break;
2439
2440 case HCI_OP_USER_CONFIRM_REPLY:
2441 hci_cc_user_confirm_reply(hdev, skb);
2442 break;
2443
2444 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2445 hci_cc_user_confirm_neg_reply(hdev, skb);
2446 break;
2447
2448 case HCI_OP_USER_PASSKEY_REPLY:
2449 hci_cc_user_passkey_reply(hdev, skb);
2450 break;
2451
2452 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2453 hci_cc_user_passkey_neg_reply(hdev, skb);
2454 break;
2455
2456 case HCI_OP_LE_SET_SCAN_PARAM:
2457 hci_cc_le_set_scan_param(hdev, skb);
2458 break;
2459
2460 case HCI_OP_LE_SET_ADV_ENABLE:
2461 hci_cc_le_set_adv_enable(hdev, skb);
2462 break;
2463
2464 case HCI_OP_LE_SET_SCAN_ENABLE:
2465 hci_cc_le_set_scan_enable(hdev, skb);
2466 break;
2467
2468 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2469 hci_cc_le_read_white_list_size(hdev, skb);
2470 break;
2471
2472 case HCI_OP_LE_LTK_REPLY:
2473 hci_cc_le_ltk_reply(hdev, skb);
2474 break;
2475
2476 case HCI_OP_LE_LTK_NEG_REPLY:
2477 hci_cc_le_ltk_neg_reply(hdev, skb);
2478 break;
2479
2480 case HCI_OP_LE_READ_SUPPORTED_STATES:
2481 hci_cc_le_read_supported_states(hdev, skb);
2482 break;
2483
2484 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2485 hci_cc_write_le_host_supported(hdev, skb);
2486 break;
2487
2488 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2489 hci_cc_write_remote_amp_assoc(hdev, skb);
2490 break;
2491
2492 default:
2493 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2494 break;
2495 }
2496
2497 if (ev->opcode != HCI_OP_NOP)
2498 del_timer(&hdev->cmd_timer);
2499
2500 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2501 atomic_set(&hdev->cmd_cnt, 1);
2502 if (!skb_queue_empty(&hdev->cmd_q))
2503 queue_work(hdev->workqueue, &hdev->cmd_work);
2504 }
2505}
2506
2507static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2508{
2509 struct hci_ev_cmd_status *ev = (void *) skb->data;
2510 __u16 opcode;
2511
2512 skb_pull(skb, sizeof(*ev));
2513
2514 opcode = __le16_to_cpu(ev->opcode);
2515
2516 switch (opcode) {
2517 case HCI_OP_INQUIRY:
2518 hci_cs_inquiry(hdev, ev->status);
2519 break;
2520
2521 case HCI_OP_CREATE_CONN:
2522 hci_cs_create_conn(hdev, ev->status);
2523 break;
2524
2525 case HCI_OP_ADD_SCO:
2526 hci_cs_add_sco(hdev, ev->status);
2527 break;
2528
2529 case HCI_OP_AUTH_REQUESTED:
2530 hci_cs_auth_requested(hdev, ev->status);
2531 break;
2532
2533 case HCI_OP_SET_CONN_ENCRYPT:
2534 hci_cs_set_conn_encrypt(hdev, ev->status);
2535 break;
2536
2537 case HCI_OP_REMOTE_NAME_REQ:
2538 hci_cs_remote_name_req(hdev, ev->status);
2539 break;
2540
2541 case HCI_OP_READ_REMOTE_FEATURES:
2542 hci_cs_read_remote_features(hdev, ev->status);
2543 break;
2544
2545 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2546 hci_cs_read_remote_ext_features(hdev, ev->status);
2547 break;
2548
2549 case HCI_OP_SETUP_SYNC_CONN:
2550 hci_cs_setup_sync_conn(hdev, ev->status);
2551 break;
2552
2553 case HCI_OP_SNIFF_MODE:
2554 hci_cs_sniff_mode(hdev, ev->status);
2555 break;
2556
2557 case HCI_OP_EXIT_SNIFF_MODE:
2558 hci_cs_exit_sniff_mode(hdev, ev->status);
2559 break;
2560
2561 case HCI_OP_DISCONNECT:
2562 hci_cs_disconnect(hdev, ev->status);
2563 break;
2564
2565 case HCI_OP_LE_CREATE_CONN:
2566 hci_cs_le_create_conn(hdev, ev->status);
2567 break;
2568
2569 case HCI_OP_LE_START_ENC:
2570 hci_cs_le_start_enc(hdev, ev->status);
2571 break;
2572
2573 case HCI_OP_CREATE_PHY_LINK:
2574 hci_cs_create_phylink(hdev, ev->status);
2575 break;
2576
2577 case HCI_OP_ACCEPT_PHY_LINK:
2578 hci_cs_accept_phylink(hdev, ev->status);
2579 break;
2580
2581 case HCI_OP_CREATE_LOGICAL_LINK:
2582 hci_cs_create_logical_link(hdev, ev->status);
2583 break;
2584
2585 default:
2586 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2587 break;
2588 }
2589
2590 if (ev->opcode != HCI_OP_NOP)
2591 del_timer(&hdev->cmd_timer);
2592
2593 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2594 atomic_set(&hdev->cmd_cnt, 1);
2595 if (!skb_queue_empty(&hdev->cmd_q))
2596 queue_work(hdev->workqueue, &hdev->cmd_work);
2597 }
2598}
2599
2600static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2601{
2602 struct hci_ev_role_change *ev = (void *) skb->data;
2603 struct hci_conn *conn;
2604
2605 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2606
2607 hci_dev_lock(hdev);
2608
2609 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2610 if (conn) {
2611 if (!ev->status) {
2612 if (ev->role)
2613 conn->link_mode &= ~HCI_LM_MASTER;
2614 else
2615 conn->link_mode |= HCI_LM_MASTER;
2616 }
2617
2618 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2619
2620 hci_role_switch_cfm(conn, ev->status, ev->role);
2621 }
2622
2623 hci_dev_unlock(hdev);
2624}
2625
2626static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2627{
2628 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2629 int i;
2630
2631 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2632 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2633 return;
2634 }
2635
2636 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2637 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2638 BT_DBG("%s bad parameters", hdev->name);
2639 return;
2640 }
2641
2642 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2643
2644 for (i = 0; i < ev->num_hndl; i++) {
2645 struct hci_comp_pkts_info *info = &ev->handles[i];
2646 struct hci_conn *conn;
2647 __u16 handle, count;
2648
2649 handle = __le16_to_cpu(info->handle);
2650 count = __le16_to_cpu(info->count);
2651
2652 conn = hci_conn_hash_lookup_handle(hdev, handle);
2653 if (!conn)
2654 continue;
2655
2656 conn->sent -= count;
2657
2658 switch (conn->type) {
2659 case ACL_LINK:
2660 hdev->acl_cnt += count;
2661 if (hdev->acl_cnt > hdev->acl_pkts)
2662 hdev->acl_cnt = hdev->acl_pkts;
2663 break;
2664
2665 case LE_LINK:
2666 if (hdev->le_pkts) {
2667 hdev->le_cnt += count;
2668 if (hdev->le_cnt > hdev->le_pkts)
2669 hdev->le_cnt = hdev->le_pkts;
2670 } else {
2671 hdev->acl_cnt += count;
2672 if (hdev->acl_cnt > hdev->acl_pkts)
2673 hdev->acl_cnt = hdev->acl_pkts;
2674 }
2675 break;
2676
2677 case SCO_LINK:
2678 hdev->sco_cnt += count;
2679 if (hdev->sco_cnt > hdev->sco_pkts)
2680 hdev->sco_cnt = hdev->sco_pkts;
2681 break;
2682
2683 default:
2684 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2685 break;
2686 }
2687 }
2688
2689 queue_work(hdev->workqueue, &hdev->tx_work);
2690}
2691
2692static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2693 __u16 handle)
2694{
2695 struct hci_chan *chan;
2696
2697 switch (hdev->dev_type) {
2698 case HCI_BREDR:
2699 return hci_conn_hash_lookup_handle(hdev, handle);
2700 case HCI_AMP:
2701 chan = hci_chan_lookup_handle(hdev, handle);
2702 if (chan)
2703 return chan->conn;
2704 break;
2705 default:
2706 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2707 break;
2708 }
2709
2710 return NULL;
2711}
2712
2713static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2714{
2715 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2716 int i;
2717
2718 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2719 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2720 return;
2721 }
2722
2723 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2724 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2725 BT_DBG("%s bad parameters", hdev->name);
2726 return;
2727 }
2728
2729 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2730 ev->num_hndl);
2731
2732 for (i = 0; i < ev->num_hndl; i++) {
2733 struct hci_comp_blocks_info *info = &ev->handles[i];
2734 struct hci_conn *conn = NULL;
2735 __u16 handle, block_count;
2736
2737 handle = __le16_to_cpu(info->handle);
2738 block_count = __le16_to_cpu(info->blocks);
2739
2740 conn = __hci_conn_lookup_handle(hdev, handle);
2741 if (!conn)
2742 continue;
2743
2744 conn->sent -= block_count;
2745
2746 switch (conn->type) {
2747 case ACL_LINK:
2748 case AMP_LINK:
2749 hdev->block_cnt += block_count;
2750 if (hdev->block_cnt > hdev->num_blocks)
2751 hdev->block_cnt = hdev->num_blocks;
2752 break;
2753
2754 default:
2755 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2756 break;
2757 }
2758 }
2759
2760 queue_work(hdev->workqueue, &hdev->tx_work);
2761}
2762
2763static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2764{
2765 struct hci_ev_mode_change *ev = (void *) skb->data;
2766 struct hci_conn *conn;
2767
2768 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2769
2770 hci_dev_lock(hdev);
2771
2772 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2773 if (conn) {
2774 conn->mode = ev->mode;
2775 conn->interval = __le16_to_cpu(ev->interval);
2776
2777 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2778 &conn->flags)) {
2779 if (conn->mode == HCI_CM_ACTIVE)
2780 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2781 else
2782 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2783 }
2784
2785 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2786 hci_sco_setup(conn, ev->status);
2787 }
2788
2789 hci_dev_unlock(hdev);
2790}
2791
2792static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2793{
2794 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2795 struct hci_conn *conn;
2796
2797 BT_DBG("%s", hdev->name);
2798
2799 hci_dev_lock(hdev);
2800
2801 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2802 if (!conn)
2803 goto unlock;
2804
2805 if (conn->state == BT_CONNECTED) {
2806 hci_conn_hold(conn);
2807 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2808 hci_conn_put(conn);
2809 }
2810
2811 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2812 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2813 sizeof(ev->bdaddr), &ev->bdaddr);
2814 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2815 u8 secure;
2816
2817 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2818 secure = 1;
2819 else
2820 secure = 0;
2821
2822 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2823 }
2824
2825unlock:
2826 hci_dev_unlock(hdev);
2827}
2828
2829static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2830{
2831 struct hci_ev_link_key_req *ev = (void *) skb->data;
2832 struct hci_cp_link_key_reply cp;
2833 struct hci_conn *conn;
2834 struct link_key *key;
2835
2836 BT_DBG("%s", hdev->name);
2837
2838 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2839 return;
2840
2841 hci_dev_lock(hdev);
2842
2843 key = hci_find_link_key(hdev, &ev->bdaddr);
2844 if (!key) {
2845 BT_DBG("%s link key not found for %pMR", hdev->name,
2846 &ev->bdaddr);
2847 goto not_found;
2848 }
2849
2850 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2851 &ev->bdaddr);
2852
2853 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2854 key->type == HCI_LK_DEBUG_COMBINATION) {
2855 BT_DBG("%s ignoring debug key", hdev->name);
2856 goto not_found;
2857 }
2858
2859 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2860 if (conn) {
2861 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2862 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2863 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2864 goto not_found;
2865 }
2866
2867 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2868 conn->pending_sec_level == BT_SECURITY_HIGH) {
2869 BT_DBG("%s ignoring key unauthenticated for high security",
2870 hdev->name);
2871 goto not_found;
2872 }
2873
2874 conn->key_type = key->type;
2875 conn->pin_length = key->pin_len;
2876 }
2877
2878 bacpy(&cp.bdaddr, &ev->bdaddr);
2879 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2880
2881 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2882
2883 hci_dev_unlock(hdev);
2884
2885 return;
2886
2887not_found:
2888 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2889 hci_dev_unlock(hdev);
2890}
2891
2892static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2893{
2894 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2895 struct hci_conn *conn;
2896 u8 pin_len = 0;
2897
2898 BT_DBG("%s", hdev->name);
2899
2900 hci_dev_lock(hdev);
2901
2902 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2903 if (conn) {
2904 hci_conn_hold(conn);
2905 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2906 pin_len = conn->pin_length;
2907
2908 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2909 conn->key_type = ev->key_type;
2910
2911 hci_conn_put(conn);
2912 }
2913
2914 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2915 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2916 ev->key_type, pin_len);
2917
2918 hci_dev_unlock(hdev);
2919}
2920
2921static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2922{
2923 struct hci_ev_clock_offset *ev = (void *) skb->data;
2924 struct hci_conn *conn;
2925
2926 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2927
2928 hci_dev_lock(hdev);
2929
2930 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2931 if (conn && !ev->status) {
2932 struct inquiry_entry *ie;
2933
2934 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2935 if (ie) {
2936 ie->data.clock_offset = ev->clock_offset;
2937 ie->timestamp = jiffies;
2938 }
2939 }
2940
2941 hci_dev_unlock(hdev);
2942}
2943
2944static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2945{
2946 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2947 struct hci_conn *conn;
2948
2949 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2950
2951 hci_dev_lock(hdev);
2952
2953 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2954 if (conn && !ev->status)
2955 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2956
2957 hci_dev_unlock(hdev);
2958}
2959
2960static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2961{
2962 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2963 struct inquiry_entry *ie;
2964
2965 BT_DBG("%s", hdev->name);
2966
2967 hci_dev_lock(hdev);
2968
2969 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2970 if (ie) {
2971 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2972 ie->timestamp = jiffies;
2973 }
2974
2975 hci_dev_unlock(hdev);
2976}
2977
2978static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2979 struct sk_buff *skb)
2980{
2981 struct inquiry_data data;
2982 int num_rsp = *((__u8 *) skb->data);
2983 bool name_known, ssp;
2984
2985 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2986
2987 if (!num_rsp)
2988 return;
2989
2990 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2991 return;
2992
2993 hci_dev_lock(hdev);
2994
2995 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2996 struct inquiry_info_with_rssi_and_pscan_mode *info;
2997 info = (void *) (skb->data + 1);
2998
2999 for (; num_rsp; num_rsp--, info++) {
3000 bacpy(&data.bdaddr, &info->bdaddr);
3001 data.pscan_rep_mode = info->pscan_rep_mode;
3002 data.pscan_period_mode = info->pscan_period_mode;
3003 data.pscan_mode = info->pscan_mode;
3004 memcpy(data.dev_class, info->dev_class, 3);
3005 data.clock_offset = info->clock_offset;
3006 data.rssi = info->rssi;
3007 data.ssp_mode = 0x00;
3008
3009 name_known = hci_inquiry_cache_update(hdev, &data,
3010 false, &ssp);
3011 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3012 info->dev_class, info->rssi,
3013 !name_known, ssp, NULL, 0);
3014 }
3015 } else {
3016 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3017
3018 for (; num_rsp; num_rsp--, info++) {
3019 bacpy(&data.bdaddr, &info->bdaddr);
3020 data.pscan_rep_mode = info->pscan_rep_mode;
3021 data.pscan_period_mode = info->pscan_period_mode;
3022 data.pscan_mode = 0x00;
3023 memcpy(data.dev_class, info->dev_class, 3);
3024 data.clock_offset = info->clock_offset;
3025 data.rssi = info->rssi;
3026 data.ssp_mode = 0x00;
3027 name_known = hci_inquiry_cache_update(hdev, &data,
3028 false, &ssp);
3029 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3030 info->dev_class, info->rssi,
3031 !name_known, ssp, NULL, 0);
3032 }
3033 }
3034
3035 hci_dev_unlock(hdev);
3036}
3037
3038static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3039 struct sk_buff *skb)
3040{
3041 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3042 struct hci_conn *conn;
3043
3044 BT_DBG("%s", hdev->name);
3045
3046 hci_dev_lock(hdev);
3047
3048 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3049 if (!conn)
3050 goto unlock;
3051
3052 if (!ev->status && ev->page == 0x01) {
3053 struct inquiry_entry *ie;
3054
3055 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3056 if (ie)
3057 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3058
3059 if (ev->features[0] & LMP_HOST_SSP)
3060 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3061 }
3062
3063 if (conn->state != BT_CONFIG)
3064 goto unlock;
3065
3066 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3067 struct hci_cp_remote_name_req cp;
3068 memset(&cp, 0, sizeof(cp));
3069 bacpy(&cp.bdaddr, &conn->dst);
3070 cp.pscan_rep_mode = 0x02;
3071 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3072 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3073 mgmt_device_connected(hdev, &conn->dst, conn->type,
3074 conn->dst_type, 0, NULL, 0,
3075 conn->dev_class);
3076
3077 if (!hci_outgoing_auth_needed(hdev, conn)) {
3078 conn->state = BT_CONNECTED;
3079 hci_proto_connect_cfm(conn, ev->status);
3080 hci_conn_put(conn);
3081 }
3082
3083unlock:
3084 hci_dev_unlock(hdev);
3085}
3086
3087static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3088 struct sk_buff *skb)
3089{
3090 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3091 struct hci_conn *conn;
3092
3093 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3094
3095 hci_dev_lock(hdev);
3096
3097 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3098 if (!conn) {
3099 if (ev->link_type == ESCO_LINK)
3100 goto unlock;
3101
3102 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3103 if (!conn)
3104 goto unlock;
3105
3106 conn->type = SCO_LINK;
3107 }
3108
3109 switch (ev->status) {
3110 case 0x00:
3111 conn->handle = __le16_to_cpu(ev->handle);
3112 conn->state = BT_CONNECTED;
3113
3114 hci_conn_hold_device(conn);
3115 hci_conn_add_sysfs(conn);
3116 break;
3117
3118 case 0x11: /* Unsupported Feature or Parameter Value */
3119 case 0x1c: /* SCO interval rejected */
3120 case 0x1a: /* Unsupported Remote Feature */
3121 case 0x1f: /* Unspecified error */
3122 if (conn->out && conn->attempt < 2) {
3123 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3124 (hdev->esco_type & EDR_ESCO_MASK);
3125 hci_setup_sync(conn, conn->link->handle);
3126 goto unlock;
3127 }
3128 /* fall through */
3129
3130 default:
3131 conn->state = BT_CLOSED;
3132 break;
3133 }
3134
3135 hci_proto_connect_cfm(conn, ev->status);
3136 if (ev->status)
3137 hci_conn_del(conn);
3138
3139unlock:
3140 hci_dev_unlock(hdev);
3141}
3142
3143static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
3144{
3145 BT_DBG("%s", hdev->name);
3146}
3147
3148static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
3149{
3150 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
3151
3152 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3153}
3154
3155static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3156 struct sk_buff *skb)
3157{
3158 struct inquiry_data data;
3159 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3160 int num_rsp = *((__u8 *) skb->data);
3161 size_t eir_len;
3162
3163 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3164
3165 if (!num_rsp)
3166 return;
3167
3168 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3169 return;
3170
3171 hci_dev_lock(hdev);
3172
3173 for (; num_rsp; num_rsp--, info++) {
3174 bool name_known, ssp;
3175
3176 bacpy(&data.bdaddr, &info->bdaddr);
3177 data.pscan_rep_mode = info->pscan_rep_mode;
3178 data.pscan_period_mode = info->pscan_period_mode;
3179 data.pscan_mode = 0x00;
3180 memcpy(data.dev_class, info->dev_class, 3);
3181 data.clock_offset = info->clock_offset;
3182 data.rssi = info->rssi;
3183 data.ssp_mode = 0x01;
3184
3185 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3186 name_known = eir_has_data_type(info->data,
3187 sizeof(info->data),
3188 EIR_NAME_COMPLETE);
3189 else
3190 name_known = true;
3191
3192 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3193 &ssp);
3194 eir_len = eir_get_length(info->data, sizeof(info->data));
3195 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3196 info->dev_class, info->rssi, !name_known,
3197 ssp, info->data, eir_len);
3198 }
3199
3200 hci_dev_unlock(hdev);
3201}
3202
3203static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3204 struct sk_buff *skb)
3205{
3206 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3207 struct hci_conn *conn;
3208
3209 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3210 __le16_to_cpu(ev->handle));
3211
3212 hci_dev_lock(hdev);
3213
3214 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3215 if (!conn)
3216 goto unlock;
3217
3218 if (!ev->status)
3219 conn->sec_level = conn->pending_sec_level;
3220
3221 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3222
3223 if (ev->status && conn->state == BT_CONNECTED) {
3224 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3225 hci_conn_put(conn);
3226 goto unlock;
3227 }
3228
3229 if (conn->state == BT_CONFIG) {
3230 if (!ev->status)
3231 conn->state = BT_CONNECTED;
3232
3233 hci_proto_connect_cfm(conn, ev->status);
3234 hci_conn_put(conn);
3235 } else {
3236 hci_auth_cfm(conn, ev->status);
3237
3238 hci_conn_hold(conn);
3239 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3240 hci_conn_put(conn);
3241 }
3242
3243unlock:
3244 hci_dev_unlock(hdev);
3245}
3246
3247static u8 hci_get_auth_req(struct hci_conn *conn)
3248{
3249 /* If remote requests dedicated bonding follow that lead */
3250 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3251 /* If both remote and local IO capabilities allow MITM
3252 * protection then require it, otherwise don't */
3253 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3254 return 0x02;
3255 else
3256 return 0x03;
3257 }
3258
3259 /* If remote requests no-bonding follow that lead */
3260 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3261 return conn->remote_auth | (conn->auth_type & 0x01);
3262
3263 return conn->auth_type;
3264}
3265
3266static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3267{
3268 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3269 struct hci_conn *conn;
3270
3271 BT_DBG("%s", hdev->name);
3272
3273 hci_dev_lock(hdev);
3274
3275 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3276 if (!conn)
3277 goto unlock;
3278
3279 hci_conn_hold(conn);
3280
3281 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3282 goto unlock;
3283
3284 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3285 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3286 struct hci_cp_io_capability_reply cp;
3287
3288 bacpy(&cp.bdaddr, &ev->bdaddr);
3289 /* Change the IO capability from KeyboardDisplay
3290 * to DisplayYesNo as it is not supported by BT spec. */
3291 cp.capability = (conn->io_capability == 0x04) ?
3292 0x01 : conn->io_capability;
3293 conn->auth_type = hci_get_auth_req(conn);
3294 cp.authentication = conn->auth_type;
3295
3296 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3297 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3298 cp.oob_data = 0x01;
3299 else
3300 cp.oob_data = 0x00;
3301
3302 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3303 sizeof(cp), &cp);
3304 } else {
3305 struct hci_cp_io_capability_neg_reply cp;
3306
3307 bacpy(&cp.bdaddr, &ev->bdaddr);
3308 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3309
3310 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3311 sizeof(cp), &cp);
3312 }
3313
3314unlock:
3315 hci_dev_unlock(hdev);
3316}
3317
3318static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3319{
3320 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3321 struct hci_conn *conn;
3322
3323 BT_DBG("%s", hdev->name);
3324
3325 hci_dev_lock(hdev);
3326
3327 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3328 if (!conn)
3329 goto unlock;
3330
3331 conn->remote_cap = ev->capability;
3332 conn->remote_auth = ev->authentication;
3333 if (ev->oob_data)
3334 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3335
3336unlock:
3337 hci_dev_unlock(hdev);
3338}
3339
3340static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3341 struct sk_buff *skb)
3342{
3343 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3344 int loc_mitm, rem_mitm, confirm_hint = 0;
3345 struct hci_conn *conn;
3346
3347 BT_DBG("%s", hdev->name);
3348
3349 hci_dev_lock(hdev);
3350
3351 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3352 goto unlock;
3353
3354 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3355 if (!conn)
3356 goto unlock;
3357
3358 loc_mitm = (conn->auth_type & 0x01);
3359 rem_mitm = (conn->remote_auth & 0x01);
3360
3361 /* If we require MITM but the remote device can't provide that
3362 * (it has NoInputNoOutput) then reject the confirmation
3363 * request. The only exception is when we're dedicated bonding
3364 * initiators (connect_cfm_cb set) since then we always have the MITM
3365 * bit set. */
3366 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3367 BT_DBG("Rejecting request: remote device can't provide MITM");
3368 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3369 sizeof(ev->bdaddr), &ev->bdaddr);
3370 goto unlock;
3371 }
3372
3373 /* If no side requires MITM protection; auto-accept */
3374 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3375 (!rem_mitm || conn->io_capability == 0x03)) {
3376
3377 /* If we're not the initiators request authorization to
3378 * proceed from user space (mgmt_user_confirm with
3379 * confirm_hint set to 1). */
3380 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3381 BT_DBG("Confirming auto-accept as acceptor");
3382 confirm_hint = 1;
3383 goto confirm;
3384 }
3385
3386 BT_DBG("Auto-accept of user confirmation with %ums delay",
3387 hdev->auto_accept_delay);
3388
3389 if (hdev->auto_accept_delay > 0) {
3390 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3391 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3392 goto unlock;
3393 }
3394
3395 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3396 sizeof(ev->bdaddr), &ev->bdaddr);
3397 goto unlock;
3398 }
3399
3400confirm:
3401 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3402 confirm_hint);
3403
3404unlock:
3405 hci_dev_unlock(hdev);
3406}
3407
3408static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3409 struct sk_buff *skb)
3410{
3411 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3412
3413 BT_DBG("%s", hdev->name);
3414
3415 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3416 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3417}
3418
3419static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3420 struct sk_buff *skb)
3421{
3422 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3423 struct hci_conn *conn;
3424
3425 BT_DBG("%s", hdev->name);
3426
3427 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3428 if (!conn)
3429 return;
3430
3431 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3432 conn->passkey_entered = 0;
3433
3434 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3435 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3436 conn->dst_type, conn->passkey_notify,
3437 conn->passkey_entered);
3438}
3439
3440static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3441{
3442 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3443 struct hci_conn *conn;
3444
3445 BT_DBG("%s", hdev->name);
3446
3447 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3448 if (!conn)
3449 return;
3450
3451 switch (ev->type) {
3452 case HCI_KEYPRESS_STARTED:
3453 conn->passkey_entered = 0;
3454 return;
3455
3456 case HCI_KEYPRESS_ENTERED:
3457 conn->passkey_entered++;
3458 break;
3459
3460 case HCI_KEYPRESS_ERASED:
3461 conn->passkey_entered--;
3462 break;
3463
3464 case HCI_KEYPRESS_CLEARED:
3465 conn->passkey_entered = 0;
3466 break;
3467
3468 case HCI_KEYPRESS_COMPLETED:
3469 return;
3470 }
3471
3472 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3473 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3474 conn->dst_type, conn->passkey_notify,
3475 conn->passkey_entered);
3476}
3477
3478static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3479 struct sk_buff *skb)
3480{
3481 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3482 struct hci_conn *conn;
3483
3484 BT_DBG("%s", hdev->name);
3485
3486 hci_dev_lock(hdev);
3487
3488 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3489 if (!conn)
3490 goto unlock;
3491
3492 /* To avoid duplicate auth_failed events to user space we check
3493 * the HCI_CONN_AUTH_PEND flag which will be set if we
3494 * initiated the authentication. A traditional auth_complete
3495 * event gets always produced as initiator and is also mapped to
3496 * the mgmt_auth_failed event */
3497 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3498 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3499 ev->status);
3500
3501 hci_conn_put(conn);
3502
3503unlock:
3504 hci_dev_unlock(hdev);
3505}
3506
3507static void hci_remote_host_features_evt(struct hci_dev *hdev,
3508 struct sk_buff *skb)
3509{
3510 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3511 struct inquiry_entry *ie;
3512
3513 BT_DBG("%s", hdev->name);
3514
3515 hci_dev_lock(hdev);
3516
3517 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3518 if (ie)
3519 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3520
3521 hci_dev_unlock(hdev);
3522}
3523
3524static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3525 struct sk_buff *skb)
3526{
3527 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3528 struct oob_data *data;
3529
3530 BT_DBG("%s", hdev->name);
3531
3532 hci_dev_lock(hdev);
3533
3534 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3535 goto unlock;
3536
3537 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3538 if (data) {
3539 struct hci_cp_remote_oob_data_reply cp;
3540
3541 bacpy(&cp.bdaddr, &ev->bdaddr);
3542 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3543 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3544
3545 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3546 &cp);
3547 } else {
3548 struct hci_cp_remote_oob_data_neg_reply cp;
3549
3550 bacpy(&cp.bdaddr, &ev->bdaddr);
3551 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3552 &cp);
3553 }
3554
3555unlock:
3556 hci_dev_unlock(hdev);
3557}
3558
3559static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3560 struct sk_buff *skb)
3561{
3562 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3563 struct hci_conn *hcon, *bredr_hcon;
3564
3565 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3566 ev->status);
3567
3568 hci_dev_lock(hdev);
3569
3570 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3571 if (!hcon) {
3572 hci_dev_unlock(hdev);
3573 return;
3574 }
3575
3576 if (ev->status) {
3577 hci_conn_del(hcon);
3578 hci_dev_unlock(hdev);
3579 return;
3580 }
3581
3582 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3583
3584 hcon->state = BT_CONNECTED;
3585 bacpy(&hcon->dst, &bredr_hcon->dst);
3586
3587 hci_conn_hold(hcon);
3588 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3589 hci_conn_put(hcon);
3590
3591 hci_conn_hold_device(hcon);
3592 hci_conn_add_sysfs(hcon);
3593
3594 amp_physical_cfm(bredr_hcon, hcon);
3595
3596 hci_dev_unlock(hdev);
3597}
3598
3599static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3600{
3601 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3602 struct hci_conn *hcon;
3603 struct hci_chan *hchan;
3604 struct amp_mgr *mgr;
3605
3606 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3607 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3608 ev->status);
3609
3610 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3611 if (!hcon)
3612 return;
3613
3614 /* Create AMP hchan */
3615 hchan = hci_chan_create(hcon);
3616 if (!hchan)
3617 return;
3618
3619 hchan->handle = le16_to_cpu(ev->handle);
3620
3621 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3622
3623 mgr = hcon->amp_mgr;
3624 if (mgr && mgr->bredr_chan) {
3625 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3626
3627 l2cap_chan_lock(bredr_chan);
3628
3629 bredr_chan->conn->mtu = hdev->block_mtu;
3630 l2cap_logical_cfm(bredr_chan, hchan, 0);
3631 hci_conn_hold(hcon);
3632
3633 l2cap_chan_unlock(bredr_chan);
3634 }
3635}
3636
3637static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3638 struct sk_buff *skb)
3639{
3640 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3641 struct hci_chan *hchan;
3642
3643 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3644 le16_to_cpu(ev->handle), ev->status);
3645
3646 if (ev->status)
3647 return;
3648
3649 hci_dev_lock(hdev);
3650
3651 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3652 if (!hchan)
3653 goto unlock;
3654
3655 amp_destroy_logical_link(hchan, ev->reason);
3656
3657unlock:
3658 hci_dev_unlock(hdev);
3659}
3660
3661static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3662 struct sk_buff *skb)
3663{
3664 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3665 struct hci_conn *hcon;
3666
3667 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3668
3669 if (ev->status)
3670 return;
3671
3672 hci_dev_lock(hdev);
3673
3674 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3675 if (hcon) {
3676 hcon->state = BT_CLOSED;
3677 hci_conn_del(hcon);
3678 }
3679
3680 hci_dev_unlock(hdev);
3681}
3682
3683static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3684{
3685 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3686 struct hci_conn *conn;
3687
3688 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3689
3690 hci_dev_lock(hdev);
3691
3692 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3693 if (!conn) {
3694 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3695 if (!conn) {
3696 BT_ERR("No memory for new connection");
3697 goto unlock;
3698 }
3699
3700 conn->dst_type = ev->bdaddr_type;
3701
3702 if (ev->role == LE_CONN_ROLE_MASTER) {
3703 conn->out = true;
3704 conn->link_mode |= HCI_LM_MASTER;
3705 }
3706 }
3707
3708 if (ev->status) {
3709 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3710 conn->dst_type, ev->status);
3711 hci_proto_connect_cfm(conn, ev->status);
3712 conn->state = BT_CLOSED;
3713 hci_conn_del(conn);
3714 goto unlock;
3715 }
3716
3717 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3718 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3719 conn->dst_type, 0, NULL, 0, NULL);
3720
3721 conn->sec_level = BT_SECURITY_LOW;
3722 conn->handle = __le16_to_cpu(ev->handle);
3723 conn->state = BT_CONNECTED;
3724
3725 hci_conn_hold_device(conn);
3726 hci_conn_add_sysfs(conn);
3727
3728 hci_proto_connect_cfm(conn, ev->status);
3729
3730unlock:
3731 hci_dev_unlock(hdev);
3732}
3733
3734static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3735{
3736 u8 num_reports = skb->data[0];
3737 void *ptr = &skb->data[1];
3738 s8 rssi;
3739
3740 while (num_reports--) {
3741 struct hci_ev_le_advertising_info *ev = ptr;
3742
3743 rssi = ev->data[ev->length];
3744 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3745 NULL, rssi, 0, 1, ev->data, ev->length);
3746
3747 ptr += sizeof(*ev) + ev->length + 1;
3748 }
3749}
3750
3751static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3752{
3753 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3754 struct hci_cp_le_ltk_reply cp;
3755 struct hci_cp_le_ltk_neg_reply neg;
3756 struct hci_conn *conn;
3757 struct smp_ltk *ltk;
3758
3759 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3760
3761 hci_dev_lock(hdev);
3762
3763 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3764 if (conn == NULL)
3765 goto not_found;
3766
3767 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3768 if (ltk == NULL)
3769 goto not_found;
3770
3771 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3772 cp.handle = cpu_to_le16(conn->handle);
3773
3774 if (ltk->authenticated)
3775 conn->sec_level = BT_SECURITY_HIGH;
3776
3777 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3778
3779 if (ltk->type & HCI_SMP_STK) {
3780 list_del(&ltk->list);
3781 kfree(ltk);
3782 }
3783
3784 hci_dev_unlock(hdev);
3785
3786 return;
3787
3788not_found:
3789 neg.handle = ev->handle;
3790 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3791 hci_dev_unlock(hdev);
3792}
3793
3794static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3795{
3796 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3797
3798 skb_pull(skb, sizeof(*le_ev));
3799
3800 switch (le_ev->subevent) {
3801 case HCI_EV_LE_CONN_COMPLETE:
3802 hci_le_conn_complete_evt(hdev, skb);
3803 break;
3804
3805 case HCI_EV_LE_ADVERTISING_REPORT:
3806 hci_le_adv_report_evt(hdev, skb);
3807 break;
3808
3809 case HCI_EV_LE_LTK_REQ:
3810 hci_le_ltk_request_evt(hdev, skb);
3811 break;
3812
3813 default:
3814 break;
3815 }
3816}
3817
3818static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3819{
3820 struct hci_ev_channel_selected *ev = (void *) skb->data;
3821 struct hci_conn *hcon;
3822
3823 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3824
3825 skb_pull(skb, sizeof(*ev));
3826
3827 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3828 if (!hcon)
3829 return;
3830
3831 amp_read_loc_assoc_final_data(hdev, hcon);
3832}
3833
3834void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3835{
3836 struct hci_event_hdr *hdr = (void *) skb->data;
3837 __u8 event = hdr->evt;
3838
3839 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3840
3841 switch (event) {
3842 case HCI_EV_INQUIRY_COMPLETE:
3843 hci_inquiry_complete_evt(hdev, skb);
3844 break;
3845
3846 case HCI_EV_INQUIRY_RESULT:
3847 hci_inquiry_result_evt(hdev, skb);
3848 break;
3849
3850 case HCI_EV_CONN_COMPLETE:
3851 hci_conn_complete_evt(hdev, skb);
3852 break;
3853
3854 case HCI_EV_CONN_REQUEST:
3855 hci_conn_request_evt(hdev, skb);
3856 break;
3857
3858 case HCI_EV_DISCONN_COMPLETE:
3859 hci_disconn_complete_evt(hdev, skb);
3860 break;
3861
3862 case HCI_EV_AUTH_COMPLETE:
3863 hci_auth_complete_evt(hdev, skb);
3864 break;
3865
3866 case HCI_EV_REMOTE_NAME:
3867 hci_remote_name_evt(hdev, skb);
3868 break;
3869
3870 case HCI_EV_ENCRYPT_CHANGE:
3871 hci_encrypt_change_evt(hdev, skb);
3872 break;
3873
3874 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3875 hci_change_link_key_complete_evt(hdev, skb);
3876 break;
3877
3878 case HCI_EV_REMOTE_FEATURES:
3879 hci_remote_features_evt(hdev, skb);
3880 break;
3881
3882 case HCI_EV_REMOTE_VERSION:
3883 hci_remote_version_evt(hdev, skb);
3884 break;
3885
3886 case HCI_EV_QOS_SETUP_COMPLETE:
3887 hci_qos_setup_complete_evt(hdev, skb);
3888 break;
3889
3890 case HCI_EV_CMD_COMPLETE:
3891 hci_cmd_complete_evt(hdev, skb);
3892 break;
3893
3894 case HCI_EV_CMD_STATUS:
3895 hci_cmd_status_evt(hdev, skb);
3896 break;
3897
3898 case HCI_EV_ROLE_CHANGE:
3899 hci_role_change_evt(hdev, skb);
3900 break;
3901
3902 case HCI_EV_NUM_COMP_PKTS:
3903 hci_num_comp_pkts_evt(hdev, skb);
3904 break;
3905
3906 case HCI_EV_MODE_CHANGE:
3907 hci_mode_change_evt(hdev, skb);
3908 break;
3909
3910 case HCI_EV_PIN_CODE_REQ:
3911 hci_pin_code_request_evt(hdev, skb);
3912 break;
3913
3914 case HCI_EV_LINK_KEY_REQ:
3915 hci_link_key_request_evt(hdev, skb);
3916 break;
3917
3918 case HCI_EV_LINK_KEY_NOTIFY:
3919 hci_link_key_notify_evt(hdev, skb);
3920 break;
3921
3922 case HCI_EV_CLOCK_OFFSET:
3923 hci_clock_offset_evt(hdev, skb);
3924 break;
3925
3926 case HCI_EV_PKT_TYPE_CHANGE:
3927 hci_pkt_type_change_evt(hdev, skb);
3928 break;
3929
3930 case HCI_EV_PSCAN_REP_MODE:
3931 hci_pscan_rep_mode_evt(hdev, skb);
3932 break;
3933
3934 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3935 hci_inquiry_result_with_rssi_evt(hdev, skb);
3936 break;
3937
3938 case HCI_EV_REMOTE_EXT_FEATURES:
3939 hci_remote_ext_features_evt(hdev, skb);
3940 break;
3941
3942 case HCI_EV_SYNC_CONN_COMPLETE:
3943 hci_sync_conn_complete_evt(hdev, skb);
3944 break;
3945
3946 case HCI_EV_SYNC_CONN_CHANGED:
3947 hci_sync_conn_changed_evt(hdev, skb);
3948 break;
3949
3950 case HCI_EV_SNIFF_SUBRATE:
3951 hci_sniff_subrate_evt(hdev, skb);
3952 break;
3953
3954 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3955 hci_extended_inquiry_result_evt(hdev, skb);
3956 break;
3957
3958 case HCI_EV_KEY_REFRESH_COMPLETE:
3959 hci_key_refresh_complete_evt(hdev, skb);
3960 break;
3961
3962 case HCI_EV_IO_CAPA_REQUEST:
3963 hci_io_capa_request_evt(hdev, skb);
3964 break;
3965
3966 case HCI_EV_IO_CAPA_REPLY:
3967 hci_io_capa_reply_evt(hdev, skb);
3968 break;
3969
3970 case HCI_EV_USER_CONFIRM_REQUEST:
3971 hci_user_confirm_request_evt(hdev, skb);
3972 break;
3973
3974 case HCI_EV_USER_PASSKEY_REQUEST:
3975 hci_user_passkey_request_evt(hdev, skb);
3976 break;
3977
3978 case HCI_EV_USER_PASSKEY_NOTIFY:
3979 hci_user_passkey_notify_evt(hdev, skb);
3980 break;
3981
3982 case HCI_EV_KEYPRESS_NOTIFY:
3983 hci_keypress_notify_evt(hdev, skb);
3984 break;
3985
3986 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3987 hci_simple_pair_complete_evt(hdev, skb);
3988 break;
3989
3990 case HCI_EV_REMOTE_HOST_FEATURES:
3991 hci_remote_host_features_evt(hdev, skb);
3992 break;
3993
3994 case HCI_EV_LE_META:
3995 hci_le_meta_evt(hdev, skb);
3996 break;
3997
3998 case HCI_EV_CHANNEL_SELECTED:
3999 hci_chan_selected_evt(hdev, skb);
4000 break;
4001
4002 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4003 hci_remote_oob_data_request_evt(hdev, skb);
4004 break;
4005
4006 case HCI_EV_PHY_LINK_COMPLETE:
4007 hci_phy_link_complete_evt(hdev, skb);
4008 break;
4009
4010 case HCI_EV_LOGICAL_LINK_COMPLETE:
4011 hci_loglink_complete_evt(hdev, skb);
4012 break;
4013
4014 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4015 hci_disconn_loglink_complete_evt(hdev, skb);
4016 break;
4017
4018 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4019 hci_disconn_phylink_complete_evt(hdev, skb);
4020 break;
4021
4022 case HCI_EV_NUM_COMP_BLOCKS:
4023 hci_num_comp_blocks_evt(hdev, skb);
4024 break;
4025
4026 default:
4027 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4028 break;
4029 }
4030
4031 kfree_skb(skb);
4032 hdev->stat.evt_rx++;
4033}