Merge branch 'x86-build-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
34
35 /* Handle HCI Event packets */
36
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 __u8 status = *((__u8 *) skb->data);
40
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
42
43 if (status) {
44 hci_dev_lock(hdev);
45 mgmt_stop_discovery_failed(hdev, status);
46 hci_dev_unlock(hdev);
47 return;
48 }
49
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
57
58 hci_conn_check_pending(hdev);
59 }
60
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 {
63 __u8 status = *((__u8 *) skb->data);
64
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
66
67 if (status)
68 return;
69
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 }
72
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 __u8 status = *((__u8 *) skb->data);
76
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78
79 if (status)
80 return;
81
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83
84 hci_conn_check_pending(hdev);
85 }
86
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct sk_buff *skb)
161 {
162 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163
164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165
166 if (rp->status)
167 return;
168
169 hdev->link_policy = __le16_to_cpu(rp->policy);
170 }
171
172 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 struct sk_buff *skb)
174 {
175 __u8 status = *((__u8 *) skb->data);
176 void *sent;
177
178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
179
180 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 if (!sent)
182 return;
183
184 if (!status)
185 hdev->link_policy = get_unaligned_le16(sent);
186
187 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
188 }
189
190 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
191 {
192 __u8 status = *((__u8 *) skb->data);
193
194 BT_DBG("%s status 0x%2.2x", hdev->name, status);
195
196 clear_bit(HCI_RESET, &hdev->flags);
197
198 hci_req_complete(hdev, HCI_OP_RESET, status);
199
200 /* Reset all non-persistent flags */
201 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
202 BIT(HCI_PERIODIC_INQ));
203
204 hdev->discovery.state = DISCOVERY_STOPPED;
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
207
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
210 }
211
212 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
213 {
214 __u8 status = *((__u8 *) skb->data);
215 void *sent;
216
217 BT_DBG("%s status 0x%2.2x", hdev->name, status);
218
219 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
220 if (!sent)
221 return;
222
223 hci_dev_lock(hdev);
224
225 if (test_bit(HCI_MGMT, &hdev->dev_flags))
226 mgmt_set_local_name_complete(hdev, sent, status);
227 else if (!status)
228 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
229
230 hci_dev_unlock(hdev);
231
232 if (!status && !test_bit(HCI_INIT, &hdev->flags))
233 hci_update_ad(hdev);
234
235 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
236 }
237
238 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
239 {
240 struct hci_rp_read_local_name *rp = (void *) skb->data;
241
242 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
243
244 if (rp->status)
245 return;
246
247 if (test_bit(HCI_SETUP, &hdev->dev_flags))
248 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
249 }
250
251 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
252 {
253 __u8 status = *((__u8 *) skb->data);
254 void *sent;
255
256 BT_DBG("%s status 0x%2.2x", hdev->name, status);
257
258 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
259 if (!sent)
260 return;
261
262 if (!status) {
263 __u8 param = *((__u8 *) sent);
264
265 if (param == AUTH_ENABLED)
266 set_bit(HCI_AUTH, &hdev->flags);
267 else
268 clear_bit(HCI_AUTH, &hdev->flags);
269 }
270
271 if (test_bit(HCI_MGMT, &hdev->dev_flags))
272 mgmt_auth_enable_complete(hdev, status);
273
274 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
275 }
276
277 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
278 {
279 __u8 status = *((__u8 *) skb->data);
280 void *sent;
281
282 BT_DBG("%s status 0x%2.2x", hdev->name, status);
283
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
285 if (!sent)
286 return;
287
288 if (!status) {
289 __u8 param = *((__u8 *) sent);
290
291 if (param)
292 set_bit(HCI_ENCRYPT, &hdev->flags);
293 else
294 clear_bit(HCI_ENCRYPT, &hdev->flags);
295 }
296
297 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
298 }
299
300 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
301 {
302 __u8 param, status = *((__u8 *) skb->data);
303 int old_pscan, old_iscan;
304 void *sent;
305
306 BT_DBG("%s status 0x%2.2x", hdev->name, status);
307
308 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
309 if (!sent)
310 return;
311
312 param = *((__u8 *) sent);
313
314 hci_dev_lock(hdev);
315
316 if (status) {
317 mgmt_write_scan_failed(hdev, param, status);
318 hdev->discov_timeout = 0;
319 goto done;
320 }
321
322 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
323 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
324
325 if (param & SCAN_INQUIRY) {
326 set_bit(HCI_ISCAN, &hdev->flags);
327 if (!old_iscan)
328 mgmt_discoverable(hdev, 1);
329 if (hdev->discov_timeout > 0) {
330 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
331 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
332 to);
333 }
334 } else if (old_iscan)
335 mgmt_discoverable(hdev, 0);
336
337 if (param & SCAN_PAGE) {
338 set_bit(HCI_PSCAN, &hdev->flags);
339 if (!old_pscan)
340 mgmt_connectable(hdev, 1);
341 } else if (old_pscan)
342 mgmt_connectable(hdev, 0);
343
344 done:
345 hci_dev_unlock(hdev);
346 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
347 }
348
349 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
352
353 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
354
355 if (rp->status)
356 return;
357
358 memcpy(hdev->dev_class, rp->dev_class, 3);
359
360 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
361 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
362 }
363
364 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
365 {
366 __u8 status = *((__u8 *) skb->data);
367 void *sent;
368
369 BT_DBG("%s status 0x%2.2x", hdev->name, status);
370
371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
372 if (!sent)
373 return;
374
375 hci_dev_lock(hdev);
376
377 if (status == 0)
378 memcpy(hdev->dev_class, sent, 3);
379
380 if (test_bit(HCI_MGMT, &hdev->dev_flags))
381 mgmt_set_class_of_dev_complete(hdev, sent, status);
382
383 hci_dev_unlock(hdev);
384 }
385
386 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
387 {
388 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
389 __u16 setting;
390
391 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
392
393 if (rp->status)
394 return;
395
396 setting = __le16_to_cpu(rp->voice_setting);
397
398 if (hdev->voice_setting == setting)
399 return;
400
401 hdev->voice_setting = setting;
402
403 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
404
405 if (hdev->notify)
406 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
407 }
408
409 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
410 struct sk_buff *skb)
411 {
412 __u8 status = *((__u8 *) skb->data);
413 __u16 setting;
414 void *sent;
415
416 BT_DBG("%s status 0x%2.2x", hdev->name, status);
417
418 if (status)
419 return;
420
421 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
422 if (!sent)
423 return;
424
425 setting = get_unaligned_le16(sent);
426
427 if (hdev->voice_setting == setting)
428 return;
429
430 hdev->voice_setting = setting;
431
432 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
433
434 if (hdev->notify)
435 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
436 }
437
438 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
439 {
440 __u8 status = *((__u8 *) skb->data);
441
442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
443
444 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
445 }
446
447 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
448 {
449 __u8 status = *((__u8 *) skb->data);
450 struct hci_cp_write_ssp_mode *sent;
451
452 BT_DBG("%s status 0x%2.2x", hdev->name, status);
453
454 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
455 if (!sent)
456 return;
457
458 if (!status) {
459 if (sent->mode)
460 hdev->host_features[0] |= LMP_HOST_SSP;
461 else
462 hdev->host_features[0] &= ~LMP_HOST_SSP;
463 }
464
465 if (test_bit(HCI_MGMT, &hdev->dev_flags))
466 mgmt_ssp_enable_complete(hdev, sent->mode, status);
467 else if (!status) {
468 if (sent->mode)
469 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
470 else
471 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
472 }
473 }
474
475 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
476 {
477 if (lmp_ext_inq_capable(hdev))
478 return 2;
479
480 if (lmp_inq_rssi_capable(hdev))
481 return 1;
482
483 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
484 hdev->lmp_subver == 0x0757)
485 return 1;
486
487 if (hdev->manufacturer == 15) {
488 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
489 return 1;
490 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
491 return 1;
492 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
493 return 1;
494 }
495
496 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
497 hdev->lmp_subver == 0x1805)
498 return 1;
499
500 return 0;
501 }
502
503 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
504 {
505 u8 mode;
506
507 mode = hci_get_inquiry_mode(hdev);
508
509 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
510 }
511
512 static void hci_setup_event_mask(struct hci_dev *hdev)
513 {
514 /* The second byte is 0xff instead of 0x9f (two reserved bits
515 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
516 * command otherwise */
517 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
518
519 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
520 * any event mask for pre 1.2 devices */
521 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
522 return;
523
524 if (lmp_bredr_capable(hdev)) {
525 events[4] |= 0x01; /* Flow Specification Complete */
526 events[4] |= 0x02; /* Inquiry Result with RSSI */
527 events[4] |= 0x04; /* Read Remote Extended Features Complete */
528 events[5] |= 0x08; /* Synchronous Connection Complete */
529 events[5] |= 0x10; /* Synchronous Connection Changed */
530 }
531
532 if (lmp_inq_rssi_capable(hdev))
533 events[4] |= 0x02; /* Inquiry Result with RSSI */
534
535 if (lmp_sniffsubr_capable(hdev))
536 events[5] |= 0x20; /* Sniff Subrating */
537
538 if (lmp_pause_enc_capable(hdev))
539 events[5] |= 0x80; /* Encryption Key Refresh Complete */
540
541 if (lmp_ext_inq_capable(hdev))
542 events[5] |= 0x40; /* Extended Inquiry Result */
543
544 if (lmp_no_flush_capable(hdev))
545 events[7] |= 0x01; /* Enhanced Flush Complete */
546
547 if (lmp_lsto_capable(hdev))
548 events[6] |= 0x80; /* Link Supervision Timeout Changed */
549
550 if (lmp_ssp_capable(hdev)) {
551 events[6] |= 0x01; /* IO Capability Request */
552 events[6] |= 0x02; /* IO Capability Response */
553 events[6] |= 0x04; /* User Confirmation Request */
554 events[6] |= 0x08; /* User Passkey Request */
555 events[6] |= 0x10; /* Remote OOB Data Request */
556 events[6] |= 0x20; /* Simple Pairing Complete */
557 events[7] |= 0x04; /* User Passkey Notification */
558 events[7] |= 0x08; /* Keypress Notification */
559 events[7] |= 0x10; /* Remote Host Supported
560 * Features Notification */
561 }
562
563 if (lmp_le_capable(hdev))
564 events[7] |= 0x20; /* LE Meta-Event */
565
566 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
567
568 if (lmp_le_capable(hdev)) {
569 memset(events, 0, sizeof(events));
570 events[0] = 0x1f;
571 hci_send_cmd(hdev, HCI_OP_LE_SET_EVENT_MASK,
572 sizeof(events), events);
573 }
574 }
575
576 static void bredr_setup(struct hci_dev *hdev)
577 {
578 struct hci_cp_delete_stored_link_key cp;
579 __le16 param;
580 __u8 flt_type;
581
582 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
583 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
584
585 /* Read Class of Device */
586 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
587
588 /* Read Local Name */
589 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
590
591 /* Read Voice Setting */
592 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
593
594 /* Clear Event Filters */
595 flt_type = HCI_FLT_CLEAR_ALL;
596 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
597
598 /* Connection accept timeout ~20 secs */
599 param = __constant_cpu_to_le16(0x7d00);
600 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
601
602 bacpy(&cp.bdaddr, BDADDR_ANY);
603 cp.delete_all = 1;
604 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
605 }
606
607 static void le_setup(struct hci_dev *hdev)
608 {
609 /* Read LE Buffer Size */
610 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
611
612 /* Read LE Advertising Channel TX Power */
613 hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
614 }
615
616 static void hci_setup(struct hci_dev *hdev)
617 {
618 if (hdev->dev_type != HCI_BREDR)
619 return;
620
621 /* Read BD Address */
622 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
623
624 if (lmp_bredr_capable(hdev))
625 bredr_setup(hdev);
626
627 if (lmp_le_capable(hdev))
628 le_setup(hdev);
629
630 hci_setup_event_mask(hdev);
631
632 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
633 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
634
635 if (lmp_ssp_capable(hdev)) {
636 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
637 u8 mode = 0x01;
638 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
639 sizeof(mode), &mode);
640 } else {
641 struct hci_cp_write_eir cp;
642
643 memset(hdev->eir, 0, sizeof(hdev->eir));
644 memset(&cp, 0, sizeof(cp));
645
646 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
647 }
648 }
649
650 if (lmp_inq_rssi_capable(hdev))
651 hci_setup_inquiry_mode(hdev);
652
653 if (lmp_inq_tx_pwr_capable(hdev))
654 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
655
656 if (lmp_ext_feat_capable(hdev)) {
657 struct hci_cp_read_local_ext_features cp;
658
659 cp.page = 0x01;
660 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
661 &cp);
662 }
663
664 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
665 u8 enable = 1;
666 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
667 &enable);
668 }
669 }
670
671 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
672 {
673 struct hci_rp_read_local_version *rp = (void *) skb->data;
674
675 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
676
677 if (rp->status)
678 goto done;
679
680 hdev->hci_ver = rp->hci_ver;
681 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
682 hdev->lmp_ver = rp->lmp_ver;
683 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
684 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
685
686 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
687 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
688
689 if (test_bit(HCI_INIT, &hdev->flags))
690 hci_setup(hdev);
691
692 done:
693 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
694 }
695
696 static void hci_setup_link_policy(struct hci_dev *hdev)
697 {
698 struct hci_cp_write_def_link_policy cp;
699 u16 link_policy = 0;
700
701 if (lmp_rswitch_capable(hdev))
702 link_policy |= HCI_LP_RSWITCH;
703 if (lmp_hold_capable(hdev))
704 link_policy |= HCI_LP_HOLD;
705 if (lmp_sniff_capable(hdev))
706 link_policy |= HCI_LP_SNIFF;
707 if (lmp_park_capable(hdev))
708 link_policy |= HCI_LP_PARK;
709
710 cp.policy = cpu_to_le16(link_policy);
711 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
712 }
713
714 static void hci_cc_read_local_commands(struct hci_dev *hdev,
715 struct sk_buff *skb)
716 {
717 struct hci_rp_read_local_commands *rp = (void *) skb->data;
718
719 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
720
721 if (rp->status)
722 goto done;
723
724 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
725
726 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
727 hci_setup_link_policy(hdev);
728
729 done:
730 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
731 }
732
733 static void hci_cc_read_local_features(struct hci_dev *hdev,
734 struct sk_buff *skb)
735 {
736 struct hci_rp_read_local_features *rp = (void *) skb->data;
737
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
739
740 if (rp->status)
741 return;
742
743 memcpy(hdev->features, rp->features, 8);
744
745 /* Adjust default settings according to features
746 * supported by device. */
747
748 if (hdev->features[0] & LMP_3SLOT)
749 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
750
751 if (hdev->features[0] & LMP_5SLOT)
752 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
753
754 if (hdev->features[1] & LMP_HV2) {
755 hdev->pkt_type |= (HCI_HV2);
756 hdev->esco_type |= (ESCO_HV2);
757 }
758
759 if (hdev->features[1] & LMP_HV3) {
760 hdev->pkt_type |= (HCI_HV3);
761 hdev->esco_type |= (ESCO_HV3);
762 }
763
764 if (lmp_esco_capable(hdev))
765 hdev->esco_type |= (ESCO_EV3);
766
767 if (hdev->features[4] & LMP_EV4)
768 hdev->esco_type |= (ESCO_EV4);
769
770 if (hdev->features[4] & LMP_EV5)
771 hdev->esco_type |= (ESCO_EV5);
772
773 if (hdev->features[5] & LMP_EDR_ESCO_2M)
774 hdev->esco_type |= (ESCO_2EV3);
775
776 if (hdev->features[5] & LMP_EDR_ESCO_3M)
777 hdev->esco_type |= (ESCO_3EV3);
778
779 if (hdev->features[5] & LMP_EDR_3S_ESCO)
780 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
781
782 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
783 hdev->features[0], hdev->features[1],
784 hdev->features[2], hdev->features[3],
785 hdev->features[4], hdev->features[5],
786 hdev->features[6], hdev->features[7]);
787 }
788
789 static void hci_set_le_support(struct hci_dev *hdev)
790 {
791 struct hci_cp_write_le_host_supported cp;
792
793 memset(&cp, 0, sizeof(cp));
794
795 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
796 cp.le = 1;
797 cp.simul = lmp_le_br_capable(hdev);
798 }
799
800 if (cp.le != lmp_host_le_capable(hdev))
801 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
802 &cp);
803 }
804
805 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
806 struct sk_buff *skb)
807 {
808 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
809
810 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
811
812 if (rp->status)
813 goto done;
814
815 switch (rp->page) {
816 case 0:
817 memcpy(hdev->features, rp->features, 8);
818 break;
819 case 1:
820 memcpy(hdev->host_features, rp->features, 8);
821 break;
822 }
823
824 if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
825 hci_set_le_support(hdev);
826
827 done:
828 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
829 }
830
831 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
832 struct sk_buff *skb)
833 {
834 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
835
836 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
837
838 if (rp->status)
839 return;
840
841 hdev->flow_ctl_mode = rp->mode;
842
843 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
844 }
845
846 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
847 {
848 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
849
850 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
851
852 if (rp->status)
853 return;
854
855 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
856 hdev->sco_mtu = rp->sco_mtu;
857 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
858 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
859
860 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
861 hdev->sco_mtu = 64;
862 hdev->sco_pkts = 8;
863 }
864
865 hdev->acl_cnt = hdev->acl_pkts;
866 hdev->sco_cnt = hdev->sco_pkts;
867
868 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
869 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
870 }
871
872 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
873 {
874 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
875
876 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
877
878 if (!rp->status)
879 bacpy(&hdev->bdaddr, &rp->bdaddr);
880
881 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
882 }
883
884 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
885 struct sk_buff *skb)
886 {
887 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
888
889 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
890
891 if (rp->status)
892 return;
893
894 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
895 hdev->block_len = __le16_to_cpu(rp->block_len);
896 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
897
898 hdev->block_cnt = hdev->num_blocks;
899
900 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
901 hdev->block_cnt, hdev->block_len);
902
903 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
904 }
905
906 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
907 {
908 __u8 status = *((__u8 *) skb->data);
909
910 BT_DBG("%s status 0x%2.2x", hdev->name, status);
911
912 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
913 }
914
915 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
916 struct sk_buff *skb)
917 {
918 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
919
920 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
921
922 if (rp->status)
923 goto a2mp_rsp;
924
925 hdev->amp_status = rp->amp_status;
926 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
927 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
928 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
929 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
930 hdev->amp_type = rp->amp_type;
931 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
932 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
933 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
934 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
935
936 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
937
938 a2mp_rsp:
939 a2mp_send_getinfo_rsp(hdev);
940 }
941
942 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
943 struct sk_buff *skb)
944 {
945 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
946 struct amp_assoc *assoc = &hdev->loc_assoc;
947 size_t rem_len, frag_len;
948
949 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
950
951 if (rp->status)
952 goto a2mp_rsp;
953
954 frag_len = skb->len - sizeof(*rp);
955 rem_len = __le16_to_cpu(rp->rem_len);
956
957 if (rem_len > frag_len) {
958 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
959
960 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
961 assoc->offset += frag_len;
962
963 /* Read other fragments */
964 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
965
966 return;
967 }
968
969 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
970 assoc->len = assoc->offset + rem_len;
971 assoc->offset = 0;
972
973 a2mp_rsp:
974 /* Send A2MP Rsp when all fragments are received */
975 a2mp_send_getampassoc_rsp(hdev, rp->status);
976 a2mp_send_create_phy_link_req(hdev, rp->status);
977 }
978
979 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
980 struct sk_buff *skb)
981 {
982 __u8 status = *((__u8 *) skb->data);
983
984 BT_DBG("%s status 0x%2.2x", hdev->name, status);
985
986 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
987 }
988
989 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
990 {
991 __u8 status = *((__u8 *) skb->data);
992
993 BT_DBG("%s status 0x%2.2x", hdev->name, status);
994
995 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
996 }
997
998 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
999 struct sk_buff *skb)
1000 {
1001 __u8 status = *((__u8 *) skb->data);
1002
1003 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1004
1005 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
1006 }
1007
1008 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
1009 struct sk_buff *skb)
1010 {
1011 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
1012
1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1014
1015 if (!rp->status)
1016 hdev->inq_tx_power = rp->tx_power;
1017
1018 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
1019 }
1020
1021 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
1022 {
1023 __u8 status = *((__u8 *) skb->data);
1024
1025 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1026
1027 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
1028 }
1029
1030 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
1031 {
1032 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
1033 struct hci_cp_pin_code_reply *cp;
1034 struct hci_conn *conn;
1035
1036 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1037
1038 hci_dev_lock(hdev);
1039
1040 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1041 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1042
1043 if (rp->status)
1044 goto unlock;
1045
1046 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1047 if (!cp)
1048 goto unlock;
1049
1050 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1051 if (conn)
1052 conn->pin_length = cp->pin_len;
1053
1054 unlock:
1055 hci_dev_unlock(hdev);
1056 }
1057
1058 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1059 {
1060 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1061
1062 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1063
1064 hci_dev_lock(hdev);
1065
1066 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1067 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1068 rp->status);
1069
1070 hci_dev_unlock(hdev);
1071 }
1072
1073 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1074 struct sk_buff *skb)
1075 {
1076 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1077
1078 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1079
1080 if (rp->status)
1081 return;
1082
1083 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1084 hdev->le_pkts = rp->le_max_pkt;
1085
1086 hdev->le_cnt = hdev->le_pkts;
1087
1088 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1089
1090 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
1091 }
1092
1093 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1094 struct sk_buff *skb)
1095 {
1096 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1097
1098 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1099
1100 if (!rp->status) {
1101 hdev->adv_tx_power = rp->tx_power;
1102 if (!test_bit(HCI_INIT, &hdev->flags))
1103 hci_update_ad(hdev);
1104 }
1105
1106 hci_req_complete(hdev, HCI_OP_LE_READ_ADV_TX_POWER, rp->status);
1107 }
1108
1109 static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
1110 {
1111 __u8 status = *((__u8 *) skb->data);
1112
1113 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1114
1115 hci_req_complete(hdev, HCI_OP_LE_SET_EVENT_MASK, status);
1116 }
1117
1118 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1119 {
1120 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1121
1122 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1123
1124 hci_dev_lock(hdev);
1125
1126 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1127 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1128 rp->status);
1129
1130 hci_dev_unlock(hdev);
1131 }
1132
1133 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1134 struct sk_buff *skb)
1135 {
1136 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1137
1138 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1139
1140 hci_dev_lock(hdev);
1141
1142 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1143 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1144 ACL_LINK, 0, rp->status);
1145
1146 hci_dev_unlock(hdev);
1147 }
1148
1149 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1150 {
1151 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1152
1153 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1154
1155 hci_dev_lock(hdev);
1156
1157 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1158 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1159 0, rp->status);
1160
1161 hci_dev_unlock(hdev);
1162 }
1163
1164 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1165 struct sk_buff *skb)
1166 {
1167 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1168
1169 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1170
1171 hci_dev_lock(hdev);
1172
1173 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1174 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1175 ACL_LINK, 0, rp->status);
1176
1177 hci_dev_unlock(hdev);
1178 }
1179
1180 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1181 struct sk_buff *skb)
1182 {
1183 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1184
1185 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1186
1187 hci_dev_lock(hdev);
1188 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1189 rp->randomizer, rp->status);
1190 hci_dev_unlock(hdev);
1191 }
1192
1193 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1194 {
1195 __u8 *sent, status = *((__u8 *) skb->data);
1196
1197 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1198
1199 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1200 if (!sent)
1201 return;
1202
1203 hci_dev_lock(hdev);
1204
1205 if (!status) {
1206 if (*sent)
1207 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
1208 else
1209 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
1210 }
1211
1212 hci_dev_unlock(hdev);
1213
1214 if (!test_bit(HCI_INIT, &hdev->flags))
1215 hci_update_ad(hdev);
1216
1217 hci_req_complete(hdev, HCI_OP_LE_SET_ADV_ENABLE, status);
1218 }
1219
1220 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1221 {
1222 __u8 status = *((__u8 *) skb->data);
1223
1224 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1225
1226 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1227
1228 if (status) {
1229 hci_dev_lock(hdev);
1230 mgmt_start_discovery_failed(hdev, status);
1231 hci_dev_unlock(hdev);
1232 return;
1233 }
1234 }
1235
1236 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1237 struct sk_buff *skb)
1238 {
1239 struct hci_cp_le_set_scan_enable *cp;
1240 __u8 status = *((__u8 *) skb->data);
1241
1242 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1243
1244 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1245 if (!cp)
1246 return;
1247
1248 switch (cp->enable) {
1249 case LE_SCANNING_ENABLED:
1250 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1251
1252 if (status) {
1253 hci_dev_lock(hdev);
1254 mgmt_start_discovery_failed(hdev, status);
1255 hci_dev_unlock(hdev);
1256 return;
1257 }
1258
1259 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1260
1261 hci_dev_lock(hdev);
1262 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1263 hci_dev_unlock(hdev);
1264 break;
1265
1266 case LE_SCANNING_DISABLED:
1267 if (status) {
1268 hci_dev_lock(hdev);
1269 mgmt_stop_discovery_failed(hdev, status);
1270 hci_dev_unlock(hdev);
1271 return;
1272 }
1273
1274 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1275
1276 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1277 hdev->discovery.state == DISCOVERY_FINDING) {
1278 mgmt_interleaved_discovery(hdev);
1279 } else {
1280 hci_dev_lock(hdev);
1281 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1282 hci_dev_unlock(hdev);
1283 }
1284
1285 break;
1286
1287 default:
1288 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1289 break;
1290 }
1291 }
1292
1293 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1294 {
1295 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1296
1297 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1298
1299 if (rp->status)
1300 return;
1301
1302 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1303 }
1304
1305 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1306 {
1307 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1308
1309 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1310
1311 if (rp->status)
1312 return;
1313
1314 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1315 }
1316
1317 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1318 struct sk_buff *skb)
1319 {
1320 struct hci_cp_write_le_host_supported *sent;
1321 __u8 status = *((__u8 *) skb->data);
1322
1323 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1324
1325 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1326 if (!sent)
1327 return;
1328
1329 if (!status) {
1330 if (sent->le)
1331 hdev->host_features[0] |= LMP_HOST_LE;
1332 else
1333 hdev->host_features[0] &= ~LMP_HOST_LE;
1334
1335 if (sent->simul)
1336 hdev->host_features[0] |= LMP_HOST_LE_BREDR;
1337 else
1338 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
1339 }
1340
1341 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1342 !test_bit(HCI_INIT, &hdev->flags))
1343 mgmt_le_enable_complete(hdev, sent->le, status);
1344
1345 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1346 }
1347
1348 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1349 struct sk_buff *skb)
1350 {
1351 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1352
1353 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1354 hdev->name, rp->status, rp->phy_handle);
1355
1356 if (rp->status)
1357 return;
1358
1359 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1360 }
1361
1362 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1363 {
1364 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1365
1366 if (status) {
1367 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1368 hci_conn_check_pending(hdev);
1369 hci_dev_lock(hdev);
1370 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1371 mgmt_start_discovery_failed(hdev, status);
1372 hci_dev_unlock(hdev);
1373 return;
1374 }
1375
1376 set_bit(HCI_INQUIRY, &hdev->flags);
1377
1378 hci_dev_lock(hdev);
1379 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1380 hci_dev_unlock(hdev);
1381 }
1382
1383 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1384 {
1385 struct hci_cp_create_conn *cp;
1386 struct hci_conn *conn;
1387
1388 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1389
1390 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1391 if (!cp)
1392 return;
1393
1394 hci_dev_lock(hdev);
1395
1396 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1397
1398 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1399
1400 if (status) {
1401 if (conn && conn->state == BT_CONNECT) {
1402 if (status != 0x0c || conn->attempt > 2) {
1403 conn->state = BT_CLOSED;
1404 hci_proto_connect_cfm(conn, status);
1405 hci_conn_del(conn);
1406 } else
1407 conn->state = BT_CONNECT2;
1408 }
1409 } else {
1410 if (!conn) {
1411 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1412 if (conn) {
1413 conn->out = true;
1414 conn->link_mode |= HCI_LM_MASTER;
1415 } else
1416 BT_ERR("No memory for new connection");
1417 }
1418 }
1419
1420 hci_dev_unlock(hdev);
1421 }
1422
1423 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1424 {
1425 struct hci_cp_add_sco *cp;
1426 struct hci_conn *acl, *sco;
1427 __u16 handle;
1428
1429 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1430
1431 if (!status)
1432 return;
1433
1434 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1435 if (!cp)
1436 return;
1437
1438 handle = __le16_to_cpu(cp->handle);
1439
1440 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1441
1442 hci_dev_lock(hdev);
1443
1444 acl = hci_conn_hash_lookup_handle(hdev, handle);
1445 if (acl) {
1446 sco = acl->link;
1447 if (sco) {
1448 sco->state = BT_CLOSED;
1449
1450 hci_proto_connect_cfm(sco, status);
1451 hci_conn_del(sco);
1452 }
1453 }
1454
1455 hci_dev_unlock(hdev);
1456 }
1457
1458 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1459 {
1460 struct hci_cp_auth_requested *cp;
1461 struct hci_conn *conn;
1462
1463 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1464
1465 if (!status)
1466 return;
1467
1468 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1469 if (!cp)
1470 return;
1471
1472 hci_dev_lock(hdev);
1473
1474 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1475 if (conn) {
1476 if (conn->state == BT_CONFIG) {
1477 hci_proto_connect_cfm(conn, status);
1478 hci_conn_put(conn);
1479 }
1480 }
1481
1482 hci_dev_unlock(hdev);
1483 }
1484
1485 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1486 {
1487 struct hci_cp_set_conn_encrypt *cp;
1488 struct hci_conn *conn;
1489
1490 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1491
1492 if (!status)
1493 return;
1494
1495 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1496 if (!cp)
1497 return;
1498
1499 hci_dev_lock(hdev);
1500
1501 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1502 if (conn) {
1503 if (conn->state == BT_CONFIG) {
1504 hci_proto_connect_cfm(conn, status);
1505 hci_conn_put(conn);
1506 }
1507 }
1508
1509 hci_dev_unlock(hdev);
1510 }
1511
1512 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1513 struct hci_conn *conn)
1514 {
1515 if (conn->state != BT_CONFIG || !conn->out)
1516 return 0;
1517
1518 if (conn->pending_sec_level == BT_SECURITY_SDP)
1519 return 0;
1520
1521 /* Only request authentication for SSP connections or non-SSP
1522 * devices with sec_level HIGH or if MITM protection is requested */
1523 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1524 conn->pending_sec_level != BT_SECURITY_HIGH)
1525 return 0;
1526
1527 return 1;
1528 }
1529
1530 static int hci_resolve_name(struct hci_dev *hdev,
1531 struct inquiry_entry *e)
1532 {
1533 struct hci_cp_remote_name_req cp;
1534
1535 memset(&cp, 0, sizeof(cp));
1536
1537 bacpy(&cp.bdaddr, &e->data.bdaddr);
1538 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1539 cp.pscan_mode = e->data.pscan_mode;
1540 cp.clock_offset = e->data.clock_offset;
1541
1542 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1543 }
1544
1545 static bool hci_resolve_next_name(struct hci_dev *hdev)
1546 {
1547 struct discovery_state *discov = &hdev->discovery;
1548 struct inquiry_entry *e;
1549
1550 if (list_empty(&discov->resolve))
1551 return false;
1552
1553 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1554 if (!e)
1555 return false;
1556
1557 if (hci_resolve_name(hdev, e) == 0) {
1558 e->name_state = NAME_PENDING;
1559 return true;
1560 }
1561
1562 return false;
1563 }
1564
1565 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1566 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1567 {
1568 struct discovery_state *discov = &hdev->discovery;
1569 struct inquiry_entry *e;
1570
1571 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1572 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1573 name_len, conn->dev_class);
1574
1575 if (discov->state == DISCOVERY_STOPPED)
1576 return;
1577
1578 if (discov->state == DISCOVERY_STOPPING)
1579 goto discov_complete;
1580
1581 if (discov->state != DISCOVERY_RESOLVING)
1582 return;
1583
1584 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1585 /* If the device was not found in a list of found devices names of which
1586 * are pending. there is no need to continue resolving a next name as it
1587 * will be done upon receiving another Remote Name Request Complete
1588 * Event */
1589 if (!e)
1590 return;
1591
1592 list_del(&e->list);
1593 if (name) {
1594 e->name_state = NAME_KNOWN;
1595 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1596 e->data.rssi, name, name_len);
1597 } else {
1598 e->name_state = NAME_NOT_KNOWN;
1599 }
1600
1601 if (hci_resolve_next_name(hdev))
1602 return;
1603
1604 discov_complete:
1605 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1606 }
1607
1608 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1609 {
1610 struct hci_cp_remote_name_req *cp;
1611 struct hci_conn *conn;
1612
1613 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1614
1615 /* If successful wait for the name req complete event before
1616 * checking for the need to do authentication */
1617 if (!status)
1618 return;
1619
1620 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1621 if (!cp)
1622 return;
1623
1624 hci_dev_lock(hdev);
1625
1626 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1627
1628 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1629 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1630
1631 if (!conn)
1632 goto unlock;
1633
1634 if (!hci_outgoing_auth_needed(hdev, conn))
1635 goto unlock;
1636
1637 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1638 struct hci_cp_auth_requested cp;
1639 cp.handle = __cpu_to_le16(conn->handle);
1640 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1641 }
1642
1643 unlock:
1644 hci_dev_unlock(hdev);
1645 }
1646
1647 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1648 {
1649 struct hci_cp_read_remote_features *cp;
1650 struct hci_conn *conn;
1651
1652 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1653
1654 if (!status)
1655 return;
1656
1657 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1658 if (!cp)
1659 return;
1660
1661 hci_dev_lock(hdev);
1662
1663 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1664 if (conn) {
1665 if (conn->state == BT_CONFIG) {
1666 hci_proto_connect_cfm(conn, status);
1667 hci_conn_put(conn);
1668 }
1669 }
1670
1671 hci_dev_unlock(hdev);
1672 }
1673
1674 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1675 {
1676 struct hci_cp_read_remote_ext_features *cp;
1677 struct hci_conn *conn;
1678
1679 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1680
1681 if (!status)
1682 return;
1683
1684 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1685 if (!cp)
1686 return;
1687
1688 hci_dev_lock(hdev);
1689
1690 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1691 if (conn) {
1692 if (conn->state == BT_CONFIG) {
1693 hci_proto_connect_cfm(conn, status);
1694 hci_conn_put(conn);
1695 }
1696 }
1697
1698 hci_dev_unlock(hdev);
1699 }
1700
1701 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1702 {
1703 struct hci_cp_setup_sync_conn *cp;
1704 struct hci_conn *acl, *sco;
1705 __u16 handle;
1706
1707 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1708
1709 if (!status)
1710 return;
1711
1712 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1713 if (!cp)
1714 return;
1715
1716 handle = __le16_to_cpu(cp->handle);
1717
1718 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1719
1720 hci_dev_lock(hdev);
1721
1722 acl = hci_conn_hash_lookup_handle(hdev, handle);
1723 if (acl) {
1724 sco = acl->link;
1725 if (sco) {
1726 sco->state = BT_CLOSED;
1727
1728 hci_proto_connect_cfm(sco, status);
1729 hci_conn_del(sco);
1730 }
1731 }
1732
1733 hci_dev_unlock(hdev);
1734 }
1735
1736 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1737 {
1738 struct hci_cp_sniff_mode *cp;
1739 struct hci_conn *conn;
1740
1741 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1742
1743 if (!status)
1744 return;
1745
1746 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1747 if (!cp)
1748 return;
1749
1750 hci_dev_lock(hdev);
1751
1752 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1753 if (conn) {
1754 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1755
1756 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1757 hci_sco_setup(conn, status);
1758 }
1759
1760 hci_dev_unlock(hdev);
1761 }
1762
1763 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1764 {
1765 struct hci_cp_exit_sniff_mode *cp;
1766 struct hci_conn *conn;
1767
1768 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1769
1770 if (!status)
1771 return;
1772
1773 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1774 if (!cp)
1775 return;
1776
1777 hci_dev_lock(hdev);
1778
1779 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1780 if (conn) {
1781 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1782
1783 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1784 hci_sco_setup(conn, status);
1785 }
1786
1787 hci_dev_unlock(hdev);
1788 }
1789
1790 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1791 {
1792 struct hci_cp_disconnect *cp;
1793 struct hci_conn *conn;
1794
1795 if (!status)
1796 return;
1797
1798 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1799 if (!cp)
1800 return;
1801
1802 hci_dev_lock(hdev);
1803
1804 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1805 if (conn)
1806 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1807 conn->dst_type, status);
1808
1809 hci_dev_unlock(hdev);
1810 }
1811
1812 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1813 {
1814 struct hci_conn *conn;
1815
1816 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1817
1818 if (status) {
1819 hci_dev_lock(hdev);
1820
1821 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1822 if (!conn) {
1823 hci_dev_unlock(hdev);
1824 return;
1825 }
1826
1827 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1828
1829 conn->state = BT_CLOSED;
1830 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1831 conn->dst_type, status);
1832 hci_proto_connect_cfm(conn, status);
1833 hci_conn_del(conn);
1834
1835 hci_dev_unlock(hdev);
1836 }
1837 }
1838
1839 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1840 {
1841 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1842 }
1843
1844 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1845 {
1846 struct hci_cp_create_phy_link *cp;
1847
1848 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1849
1850 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1851 if (!cp)
1852 return;
1853
1854 hci_dev_lock(hdev);
1855
1856 if (status) {
1857 struct hci_conn *hcon;
1858
1859 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1860 if (hcon)
1861 hci_conn_del(hcon);
1862 } else {
1863 amp_write_remote_assoc(hdev, cp->phy_handle);
1864 }
1865
1866 hci_dev_unlock(hdev);
1867 }
1868
1869 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1870 {
1871 struct hci_cp_accept_phy_link *cp;
1872
1873 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1874
1875 if (status)
1876 return;
1877
1878 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1879 if (!cp)
1880 return;
1881
1882 amp_write_remote_assoc(hdev, cp->phy_handle);
1883 }
1884
1885 static void hci_cs_create_logical_link(struct hci_dev *hdev, u8 status)
1886 {
1887 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1888 }
1889
1890 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1891 {
1892 __u8 status = *((__u8 *) skb->data);
1893 struct discovery_state *discov = &hdev->discovery;
1894 struct inquiry_entry *e;
1895
1896 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1897
1898 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1899
1900 hci_conn_check_pending(hdev);
1901
1902 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1903 return;
1904
1905 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1906 return;
1907
1908 hci_dev_lock(hdev);
1909
1910 if (discov->state != DISCOVERY_FINDING)
1911 goto unlock;
1912
1913 if (list_empty(&discov->resolve)) {
1914 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1915 goto unlock;
1916 }
1917
1918 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1919 if (e && hci_resolve_name(hdev, e) == 0) {
1920 e->name_state = NAME_PENDING;
1921 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1922 } else {
1923 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1924 }
1925
1926 unlock:
1927 hci_dev_unlock(hdev);
1928 }
1929
1930 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1931 {
1932 struct inquiry_data data;
1933 struct inquiry_info *info = (void *) (skb->data + 1);
1934 int num_rsp = *((__u8 *) skb->data);
1935
1936 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1937
1938 if (!num_rsp)
1939 return;
1940
1941 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1942 return;
1943
1944 hci_dev_lock(hdev);
1945
1946 for (; num_rsp; num_rsp--, info++) {
1947 bool name_known, ssp;
1948
1949 bacpy(&data.bdaddr, &info->bdaddr);
1950 data.pscan_rep_mode = info->pscan_rep_mode;
1951 data.pscan_period_mode = info->pscan_period_mode;
1952 data.pscan_mode = info->pscan_mode;
1953 memcpy(data.dev_class, info->dev_class, 3);
1954 data.clock_offset = info->clock_offset;
1955 data.rssi = 0x00;
1956 data.ssp_mode = 0x00;
1957
1958 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1959 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1960 info->dev_class, 0, !name_known, ssp, NULL,
1961 0);
1962 }
1963
1964 hci_dev_unlock(hdev);
1965 }
1966
1967 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1968 {
1969 struct hci_ev_conn_complete *ev = (void *) skb->data;
1970 struct hci_conn *conn;
1971
1972 BT_DBG("%s", hdev->name);
1973
1974 hci_dev_lock(hdev);
1975
1976 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1977 if (!conn) {
1978 if (ev->link_type != SCO_LINK)
1979 goto unlock;
1980
1981 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1982 if (!conn)
1983 goto unlock;
1984
1985 conn->type = SCO_LINK;
1986 }
1987
1988 if (!ev->status) {
1989 conn->handle = __le16_to_cpu(ev->handle);
1990
1991 if (conn->type == ACL_LINK) {
1992 conn->state = BT_CONFIG;
1993 hci_conn_hold(conn);
1994
1995 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1996 !hci_find_link_key(hdev, &ev->bdaddr))
1997 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1998 else
1999 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2000 } else
2001 conn->state = BT_CONNECTED;
2002
2003 hci_conn_hold_device(conn);
2004 hci_conn_add_sysfs(conn);
2005
2006 if (test_bit(HCI_AUTH, &hdev->flags))
2007 conn->link_mode |= HCI_LM_AUTH;
2008
2009 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2010 conn->link_mode |= HCI_LM_ENCRYPT;
2011
2012 /* Get remote features */
2013 if (conn->type == ACL_LINK) {
2014 struct hci_cp_read_remote_features cp;
2015 cp.handle = ev->handle;
2016 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2017 sizeof(cp), &cp);
2018 }
2019
2020 /* Set packet type for incoming connection */
2021 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2022 struct hci_cp_change_conn_ptype cp;
2023 cp.handle = ev->handle;
2024 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2025 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2026 &cp);
2027 }
2028 } else {
2029 conn->state = BT_CLOSED;
2030 if (conn->type == ACL_LINK)
2031 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
2032 conn->dst_type, ev->status);
2033 }
2034
2035 if (conn->type == ACL_LINK)
2036 hci_sco_setup(conn, ev->status);
2037
2038 if (ev->status) {
2039 hci_proto_connect_cfm(conn, ev->status);
2040 hci_conn_del(conn);
2041 } else if (ev->link_type != ACL_LINK)
2042 hci_proto_connect_cfm(conn, ev->status);
2043
2044 unlock:
2045 hci_dev_unlock(hdev);
2046
2047 hci_conn_check_pending(hdev);
2048 }
2049
2050 void hci_conn_accept(struct hci_conn *conn, int mask)
2051 {
2052 struct hci_dev *hdev = conn->hdev;
2053
2054 BT_DBG("conn %p", conn);
2055
2056 conn->state = BT_CONFIG;
2057
2058 if (!lmp_esco_capable(hdev)) {
2059 struct hci_cp_accept_conn_req cp;
2060
2061 bacpy(&cp.bdaddr, &conn->dst);
2062
2063 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2064 cp.role = 0x00; /* Become master */
2065 else
2066 cp.role = 0x01; /* Remain slave */
2067
2068 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2069 } else /* lmp_esco_capable(hdev)) */ {
2070 struct hci_cp_accept_sync_conn_req cp;
2071
2072 bacpy(&cp.bdaddr, &conn->dst);
2073 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2074
2075 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2076 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2077 cp.max_latency = __constant_cpu_to_le16(0xffff);
2078 cp.content_format = cpu_to_le16(hdev->voice_setting);
2079 cp.retrans_effort = 0xff;
2080
2081 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2082 sizeof(cp), &cp);
2083 }
2084 }
2085
2086 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2087 {
2088 struct hci_ev_conn_request *ev = (void *) skb->data;
2089 int mask = hdev->link_mode;
2090 __u8 flags = 0;
2091
2092 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2093 ev->link_type);
2094
2095 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2096 &flags);
2097
2098 if ((mask & HCI_LM_ACCEPT) &&
2099 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
2100 /* Connection accepted */
2101 struct inquiry_entry *ie;
2102 struct hci_conn *conn;
2103
2104 hci_dev_lock(hdev);
2105
2106 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2107 if (ie)
2108 memcpy(ie->data.dev_class, ev->dev_class, 3);
2109
2110 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2111 &ev->bdaddr);
2112 if (!conn) {
2113 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2114 if (!conn) {
2115 BT_ERR("No memory for new connection");
2116 hci_dev_unlock(hdev);
2117 return;
2118 }
2119 }
2120
2121 memcpy(conn->dev_class, ev->dev_class, 3);
2122
2123 hci_dev_unlock(hdev);
2124
2125 if (ev->link_type == ACL_LINK ||
2126 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2127 struct hci_cp_accept_conn_req cp;
2128 conn->state = BT_CONNECT;
2129
2130 bacpy(&cp.bdaddr, &ev->bdaddr);
2131
2132 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2133 cp.role = 0x00; /* Become master */
2134 else
2135 cp.role = 0x01; /* Remain slave */
2136
2137 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2138 &cp);
2139 } else if (!(flags & HCI_PROTO_DEFER)) {
2140 struct hci_cp_accept_sync_conn_req cp;
2141 conn->state = BT_CONNECT;
2142
2143 bacpy(&cp.bdaddr, &ev->bdaddr);
2144 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2145
2146 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2147 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2148 cp.max_latency = __constant_cpu_to_le16(0xffff);
2149 cp.content_format = cpu_to_le16(hdev->voice_setting);
2150 cp.retrans_effort = 0xff;
2151
2152 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2153 sizeof(cp), &cp);
2154 } else {
2155 conn->state = BT_CONNECT2;
2156 hci_proto_connect_cfm(conn, 0);
2157 hci_conn_put(conn);
2158 }
2159 } else {
2160 /* Connection rejected */
2161 struct hci_cp_reject_conn_req cp;
2162
2163 bacpy(&cp.bdaddr, &ev->bdaddr);
2164 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2165 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2166 }
2167 }
2168
2169 static u8 hci_to_mgmt_reason(u8 err)
2170 {
2171 switch (err) {
2172 case HCI_ERROR_CONNECTION_TIMEOUT:
2173 return MGMT_DEV_DISCONN_TIMEOUT;
2174 case HCI_ERROR_REMOTE_USER_TERM:
2175 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2176 case HCI_ERROR_REMOTE_POWER_OFF:
2177 return MGMT_DEV_DISCONN_REMOTE;
2178 case HCI_ERROR_LOCAL_HOST_TERM:
2179 return MGMT_DEV_DISCONN_LOCAL_HOST;
2180 default:
2181 return MGMT_DEV_DISCONN_UNKNOWN;
2182 }
2183 }
2184
2185 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2186 {
2187 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2188 struct hci_conn *conn;
2189
2190 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2191
2192 hci_dev_lock(hdev);
2193
2194 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2195 if (!conn)
2196 goto unlock;
2197
2198 if (ev->status == 0)
2199 conn->state = BT_CLOSED;
2200
2201 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
2202 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
2203 if (ev->status) {
2204 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2205 conn->dst_type, ev->status);
2206 } else {
2207 u8 reason = hci_to_mgmt_reason(ev->reason);
2208
2209 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
2210 conn->dst_type, reason);
2211 }
2212 }
2213
2214 if (ev->status == 0) {
2215 if (conn->type == ACL_LINK && conn->flush_key)
2216 hci_remove_link_key(hdev, &conn->dst);
2217 hci_proto_disconn_cfm(conn, ev->reason);
2218 hci_conn_del(conn);
2219 }
2220
2221 unlock:
2222 hci_dev_unlock(hdev);
2223 }
2224
2225 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2226 {
2227 struct hci_ev_auth_complete *ev = (void *) skb->data;
2228 struct hci_conn *conn;
2229
2230 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2231
2232 hci_dev_lock(hdev);
2233
2234 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2235 if (!conn)
2236 goto unlock;
2237
2238 if (!ev->status) {
2239 if (!hci_conn_ssp_enabled(conn) &&
2240 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2241 BT_INFO("re-auth of legacy device is not possible.");
2242 } else {
2243 conn->link_mode |= HCI_LM_AUTH;
2244 conn->sec_level = conn->pending_sec_level;
2245 }
2246 } else {
2247 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2248 ev->status);
2249 }
2250
2251 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2252 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2253
2254 if (conn->state == BT_CONFIG) {
2255 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2256 struct hci_cp_set_conn_encrypt cp;
2257 cp.handle = ev->handle;
2258 cp.encrypt = 0x01;
2259 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2260 &cp);
2261 } else {
2262 conn->state = BT_CONNECTED;
2263 hci_proto_connect_cfm(conn, ev->status);
2264 hci_conn_put(conn);
2265 }
2266 } else {
2267 hci_auth_cfm(conn, ev->status);
2268
2269 hci_conn_hold(conn);
2270 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2271 hci_conn_put(conn);
2272 }
2273
2274 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2275 if (!ev->status) {
2276 struct hci_cp_set_conn_encrypt cp;
2277 cp.handle = ev->handle;
2278 cp.encrypt = 0x01;
2279 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2280 &cp);
2281 } else {
2282 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2283 hci_encrypt_cfm(conn, ev->status, 0x00);
2284 }
2285 }
2286
2287 unlock:
2288 hci_dev_unlock(hdev);
2289 }
2290
2291 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2292 {
2293 struct hci_ev_remote_name *ev = (void *) skb->data;
2294 struct hci_conn *conn;
2295
2296 BT_DBG("%s", hdev->name);
2297
2298 hci_conn_check_pending(hdev);
2299
2300 hci_dev_lock(hdev);
2301
2302 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2303
2304 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2305 goto check_auth;
2306
2307 if (ev->status == 0)
2308 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2309 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2310 else
2311 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2312
2313 check_auth:
2314 if (!conn)
2315 goto unlock;
2316
2317 if (!hci_outgoing_auth_needed(hdev, conn))
2318 goto unlock;
2319
2320 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2321 struct hci_cp_auth_requested cp;
2322 cp.handle = __cpu_to_le16(conn->handle);
2323 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2324 }
2325
2326 unlock:
2327 hci_dev_unlock(hdev);
2328 }
2329
2330 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2331 {
2332 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2333 struct hci_conn *conn;
2334
2335 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2336
2337 hci_dev_lock(hdev);
2338
2339 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2340 if (conn) {
2341 if (!ev->status) {
2342 if (ev->encrypt) {
2343 /* Encryption implies authentication */
2344 conn->link_mode |= HCI_LM_AUTH;
2345 conn->link_mode |= HCI_LM_ENCRYPT;
2346 conn->sec_level = conn->pending_sec_level;
2347 } else
2348 conn->link_mode &= ~HCI_LM_ENCRYPT;
2349 }
2350
2351 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2352
2353 if (ev->status && conn->state == BT_CONNECTED) {
2354 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2355 hci_conn_put(conn);
2356 goto unlock;
2357 }
2358
2359 if (conn->state == BT_CONFIG) {
2360 if (!ev->status)
2361 conn->state = BT_CONNECTED;
2362
2363 hci_proto_connect_cfm(conn, ev->status);
2364 hci_conn_put(conn);
2365 } else
2366 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2367 }
2368
2369 unlock:
2370 hci_dev_unlock(hdev);
2371 }
2372
2373 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2374 struct sk_buff *skb)
2375 {
2376 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2377 struct hci_conn *conn;
2378
2379 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2380
2381 hci_dev_lock(hdev);
2382
2383 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2384 if (conn) {
2385 if (!ev->status)
2386 conn->link_mode |= HCI_LM_SECURE;
2387
2388 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2389
2390 hci_key_change_cfm(conn, ev->status);
2391 }
2392
2393 hci_dev_unlock(hdev);
2394 }
2395
2396 static void hci_remote_features_evt(struct hci_dev *hdev,
2397 struct sk_buff *skb)
2398 {
2399 struct hci_ev_remote_features *ev = (void *) skb->data;
2400 struct hci_conn *conn;
2401
2402 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2403
2404 hci_dev_lock(hdev);
2405
2406 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2407 if (!conn)
2408 goto unlock;
2409
2410 if (!ev->status)
2411 memcpy(conn->features, ev->features, 8);
2412
2413 if (conn->state != BT_CONFIG)
2414 goto unlock;
2415
2416 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2417 struct hci_cp_read_remote_ext_features cp;
2418 cp.handle = ev->handle;
2419 cp.page = 0x01;
2420 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2421 sizeof(cp), &cp);
2422 goto unlock;
2423 }
2424
2425 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2426 struct hci_cp_remote_name_req cp;
2427 memset(&cp, 0, sizeof(cp));
2428 bacpy(&cp.bdaddr, &conn->dst);
2429 cp.pscan_rep_mode = 0x02;
2430 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2431 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2432 mgmt_device_connected(hdev, &conn->dst, conn->type,
2433 conn->dst_type, 0, NULL, 0,
2434 conn->dev_class);
2435
2436 if (!hci_outgoing_auth_needed(hdev, conn)) {
2437 conn->state = BT_CONNECTED;
2438 hci_proto_connect_cfm(conn, ev->status);
2439 hci_conn_put(conn);
2440 }
2441
2442 unlock:
2443 hci_dev_unlock(hdev);
2444 }
2445
2446 static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2447 {
2448 BT_DBG("%s", hdev->name);
2449 }
2450
2451 static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2452 struct sk_buff *skb)
2453 {
2454 BT_DBG("%s", hdev->name);
2455 }
2456
2457 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2458 {
2459 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2460 __u16 opcode;
2461
2462 skb_pull(skb, sizeof(*ev));
2463
2464 opcode = __le16_to_cpu(ev->opcode);
2465
2466 switch (opcode) {
2467 case HCI_OP_INQUIRY_CANCEL:
2468 hci_cc_inquiry_cancel(hdev, skb);
2469 break;
2470
2471 case HCI_OP_PERIODIC_INQ:
2472 hci_cc_periodic_inq(hdev, skb);
2473 break;
2474
2475 case HCI_OP_EXIT_PERIODIC_INQ:
2476 hci_cc_exit_periodic_inq(hdev, skb);
2477 break;
2478
2479 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2480 hci_cc_remote_name_req_cancel(hdev, skb);
2481 break;
2482
2483 case HCI_OP_ROLE_DISCOVERY:
2484 hci_cc_role_discovery(hdev, skb);
2485 break;
2486
2487 case HCI_OP_READ_LINK_POLICY:
2488 hci_cc_read_link_policy(hdev, skb);
2489 break;
2490
2491 case HCI_OP_WRITE_LINK_POLICY:
2492 hci_cc_write_link_policy(hdev, skb);
2493 break;
2494
2495 case HCI_OP_READ_DEF_LINK_POLICY:
2496 hci_cc_read_def_link_policy(hdev, skb);
2497 break;
2498
2499 case HCI_OP_WRITE_DEF_LINK_POLICY:
2500 hci_cc_write_def_link_policy(hdev, skb);
2501 break;
2502
2503 case HCI_OP_RESET:
2504 hci_cc_reset(hdev, skb);
2505 break;
2506
2507 case HCI_OP_WRITE_LOCAL_NAME:
2508 hci_cc_write_local_name(hdev, skb);
2509 break;
2510
2511 case HCI_OP_READ_LOCAL_NAME:
2512 hci_cc_read_local_name(hdev, skb);
2513 break;
2514
2515 case HCI_OP_WRITE_AUTH_ENABLE:
2516 hci_cc_write_auth_enable(hdev, skb);
2517 break;
2518
2519 case HCI_OP_WRITE_ENCRYPT_MODE:
2520 hci_cc_write_encrypt_mode(hdev, skb);
2521 break;
2522
2523 case HCI_OP_WRITE_SCAN_ENABLE:
2524 hci_cc_write_scan_enable(hdev, skb);
2525 break;
2526
2527 case HCI_OP_READ_CLASS_OF_DEV:
2528 hci_cc_read_class_of_dev(hdev, skb);
2529 break;
2530
2531 case HCI_OP_WRITE_CLASS_OF_DEV:
2532 hci_cc_write_class_of_dev(hdev, skb);
2533 break;
2534
2535 case HCI_OP_READ_VOICE_SETTING:
2536 hci_cc_read_voice_setting(hdev, skb);
2537 break;
2538
2539 case HCI_OP_WRITE_VOICE_SETTING:
2540 hci_cc_write_voice_setting(hdev, skb);
2541 break;
2542
2543 case HCI_OP_HOST_BUFFER_SIZE:
2544 hci_cc_host_buffer_size(hdev, skb);
2545 break;
2546
2547 case HCI_OP_WRITE_SSP_MODE:
2548 hci_cc_write_ssp_mode(hdev, skb);
2549 break;
2550
2551 case HCI_OP_READ_LOCAL_VERSION:
2552 hci_cc_read_local_version(hdev, skb);
2553 break;
2554
2555 case HCI_OP_READ_LOCAL_COMMANDS:
2556 hci_cc_read_local_commands(hdev, skb);
2557 break;
2558
2559 case HCI_OP_READ_LOCAL_FEATURES:
2560 hci_cc_read_local_features(hdev, skb);
2561 break;
2562
2563 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2564 hci_cc_read_local_ext_features(hdev, skb);
2565 break;
2566
2567 case HCI_OP_READ_BUFFER_SIZE:
2568 hci_cc_read_buffer_size(hdev, skb);
2569 break;
2570
2571 case HCI_OP_READ_BD_ADDR:
2572 hci_cc_read_bd_addr(hdev, skb);
2573 break;
2574
2575 case HCI_OP_READ_DATA_BLOCK_SIZE:
2576 hci_cc_read_data_block_size(hdev, skb);
2577 break;
2578
2579 case HCI_OP_WRITE_CA_TIMEOUT:
2580 hci_cc_write_ca_timeout(hdev, skb);
2581 break;
2582
2583 case HCI_OP_READ_FLOW_CONTROL_MODE:
2584 hci_cc_read_flow_control_mode(hdev, skb);
2585 break;
2586
2587 case HCI_OP_READ_LOCAL_AMP_INFO:
2588 hci_cc_read_local_amp_info(hdev, skb);
2589 break;
2590
2591 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2592 hci_cc_read_local_amp_assoc(hdev, skb);
2593 break;
2594
2595 case HCI_OP_DELETE_STORED_LINK_KEY:
2596 hci_cc_delete_stored_link_key(hdev, skb);
2597 break;
2598
2599 case HCI_OP_SET_EVENT_MASK:
2600 hci_cc_set_event_mask(hdev, skb);
2601 break;
2602
2603 case HCI_OP_WRITE_INQUIRY_MODE:
2604 hci_cc_write_inquiry_mode(hdev, skb);
2605 break;
2606
2607 case HCI_OP_READ_INQ_RSP_TX_POWER:
2608 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2609 break;
2610
2611 case HCI_OP_SET_EVENT_FLT:
2612 hci_cc_set_event_flt(hdev, skb);
2613 break;
2614
2615 case HCI_OP_PIN_CODE_REPLY:
2616 hci_cc_pin_code_reply(hdev, skb);
2617 break;
2618
2619 case HCI_OP_PIN_CODE_NEG_REPLY:
2620 hci_cc_pin_code_neg_reply(hdev, skb);
2621 break;
2622
2623 case HCI_OP_READ_LOCAL_OOB_DATA:
2624 hci_cc_read_local_oob_data_reply(hdev, skb);
2625 break;
2626
2627 case HCI_OP_LE_READ_BUFFER_SIZE:
2628 hci_cc_le_read_buffer_size(hdev, skb);
2629 break;
2630
2631 case HCI_OP_LE_READ_ADV_TX_POWER:
2632 hci_cc_le_read_adv_tx_power(hdev, skb);
2633 break;
2634
2635 case HCI_OP_LE_SET_EVENT_MASK:
2636 hci_cc_le_set_event_mask(hdev, skb);
2637 break;
2638
2639 case HCI_OP_USER_CONFIRM_REPLY:
2640 hci_cc_user_confirm_reply(hdev, skb);
2641 break;
2642
2643 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2644 hci_cc_user_confirm_neg_reply(hdev, skb);
2645 break;
2646
2647 case HCI_OP_USER_PASSKEY_REPLY:
2648 hci_cc_user_passkey_reply(hdev, skb);
2649 break;
2650
2651 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2652 hci_cc_user_passkey_neg_reply(hdev, skb);
2653 break;
2654
2655 case HCI_OP_LE_SET_SCAN_PARAM:
2656 hci_cc_le_set_scan_param(hdev, skb);
2657 break;
2658
2659 case HCI_OP_LE_SET_ADV_ENABLE:
2660 hci_cc_le_set_adv_enable(hdev, skb);
2661 break;
2662
2663 case HCI_OP_LE_SET_SCAN_ENABLE:
2664 hci_cc_le_set_scan_enable(hdev, skb);
2665 break;
2666
2667 case HCI_OP_LE_LTK_REPLY:
2668 hci_cc_le_ltk_reply(hdev, skb);
2669 break;
2670
2671 case HCI_OP_LE_LTK_NEG_REPLY:
2672 hci_cc_le_ltk_neg_reply(hdev, skb);
2673 break;
2674
2675 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2676 hci_cc_write_le_host_supported(hdev, skb);
2677 break;
2678
2679 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2680 hci_cc_write_remote_amp_assoc(hdev, skb);
2681 break;
2682
2683 default:
2684 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2685 break;
2686 }
2687
2688 if (ev->opcode != HCI_OP_NOP)
2689 del_timer(&hdev->cmd_timer);
2690
2691 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2692 atomic_set(&hdev->cmd_cnt, 1);
2693 if (!skb_queue_empty(&hdev->cmd_q))
2694 queue_work(hdev->workqueue, &hdev->cmd_work);
2695 }
2696 }
2697
2698 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2699 {
2700 struct hci_ev_cmd_status *ev = (void *) skb->data;
2701 __u16 opcode;
2702
2703 skb_pull(skb, sizeof(*ev));
2704
2705 opcode = __le16_to_cpu(ev->opcode);
2706
2707 switch (opcode) {
2708 case HCI_OP_INQUIRY:
2709 hci_cs_inquiry(hdev, ev->status);
2710 break;
2711
2712 case HCI_OP_CREATE_CONN:
2713 hci_cs_create_conn(hdev, ev->status);
2714 break;
2715
2716 case HCI_OP_ADD_SCO:
2717 hci_cs_add_sco(hdev, ev->status);
2718 break;
2719
2720 case HCI_OP_AUTH_REQUESTED:
2721 hci_cs_auth_requested(hdev, ev->status);
2722 break;
2723
2724 case HCI_OP_SET_CONN_ENCRYPT:
2725 hci_cs_set_conn_encrypt(hdev, ev->status);
2726 break;
2727
2728 case HCI_OP_REMOTE_NAME_REQ:
2729 hci_cs_remote_name_req(hdev, ev->status);
2730 break;
2731
2732 case HCI_OP_READ_REMOTE_FEATURES:
2733 hci_cs_read_remote_features(hdev, ev->status);
2734 break;
2735
2736 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2737 hci_cs_read_remote_ext_features(hdev, ev->status);
2738 break;
2739
2740 case HCI_OP_SETUP_SYNC_CONN:
2741 hci_cs_setup_sync_conn(hdev, ev->status);
2742 break;
2743
2744 case HCI_OP_SNIFF_MODE:
2745 hci_cs_sniff_mode(hdev, ev->status);
2746 break;
2747
2748 case HCI_OP_EXIT_SNIFF_MODE:
2749 hci_cs_exit_sniff_mode(hdev, ev->status);
2750 break;
2751
2752 case HCI_OP_DISCONNECT:
2753 hci_cs_disconnect(hdev, ev->status);
2754 break;
2755
2756 case HCI_OP_LE_CREATE_CONN:
2757 hci_cs_le_create_conn(hdev, ev->status);
2758 break;
2759
2760 case HCI_OP_LE_START_ENC:
2761 hci_cs_le_start_enc(hdev, ev->status);
2762 break;
2763
2764 case HCI_OP_CREATE_PHY_LINK:
2765 hci_cs_create_phylink(hdev, ev->status);
2766 break;
2767
2768 case HCI_OP_ACCEPT_PHY_LINK:
2769 hci_cs_accept_phylink(hdev, ev->status);
2770 break;
2771
2772 case HCI_OP_CREATE_LOGICAL_LINK:
2773 hci_cs_create_logical_link(hdev, ev->status);
2774 break;
2775
2776 default:
2777 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2778 break;
2779 }
2780
2781 if (ev->opcode != HCI_OP_NOP)
2782 del_timer(&hdev->cmd_timer);
2783
2784 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2785 atomic_set(&hdev->cmd_cnt, 1);
2786 if (!skb_queue_empty(&hdev->cmd_q))
2787 queue_work(hdev->workqueue, &hdev->cmd_work);
2788 }
2789 }
2790
2791 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2792 {
2793 struct hci_ev_role_change *ev = (void *) skb->data;
2794 struct hci_conn *conn;
2795
2796 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2797
2798 hci_dev_lock(hdev);
2799
2800 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2801 if (conn) {
2802 if (!ev->status) {
2803 if (ev->role)
2804 conn->link_mode &= ~HCI_LM_MASTER;
2805 else
2806 conn->link_mode |= HCI_LM_MASTER;
2807 }
2808
2809 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2810
2811 hci_role_switch_cfm(conn, ev->status, ev->role);
2812 }
2813
2814 hci_dev_unlock(hdev);
2815 }
2816
2817 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2818 {
2819 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2820 int i;
2821
2822 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2823 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2824 return;
2825 }
2826
2827 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2828 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2829 BT_DBG("%s bad parameters", hdev->name);
2830 return;
2831 }
2832
2833 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2834
2835 for (i = 0; i < ev->num_hndl; i++) {
2836 struct hci_comp_pkts_info *info = &ev->handles[i];
2837 struct hci_conn *conn;
2838 __u16 handle, count;
2839
2840 handle = __le16_to_cpu(info->handle);
2841 count = __le16_to_cpu(info->count);
2842
2843 conn = hci_conn_hash_lookup_handle(hdev, handle);
2844 if (!conn)
2845 continue;
2846
2847 conn->sent -= count;
2848
2849 switch (conn->type) {
2850 case ACL_LINK:
2851 hdev->acl_cnt += count;
2852 if (hdev->acl_cnt > hdev->acl_pkts)
2853 hdev->acl_cnt = hdev->acl_pkts;
2854 break;
2855
2856 case LE_LINK:
2857 if (hdev->le_pkts) {
2858 hdev->le_cnt += count;
2859 if (hdev->le_cnt > hdev->le_pkts)
2860 hdev->le_cnt = hdev->le_pkts;
2861 } else {
2862 hdev->acl_cnt += count;
2863 if (hdev->acl_cnt > hdev->acl_pkts)
2864 hdev->acl_cnt = hdev->acl_pkts;
2865 }
2866 break;
2867
2868 case SCO_LINK:
2869 hdev->sco_cnt += count;
2870 if (hdev->sco_cnt > hdev->sco_pkts)
2871 hdev->sco_cnt = hdev->sco_pkts;
2872 break;
2873
2874 default:
2875 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2876 break;
2877 }
2878 }
2879
2880 queue_work(hdev->workqueue, &hdev->tx_work);
2881 }
2882
2883 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2884 __u16 handle)
2885 {
2886 struct hci_chan *chan;
2887
2888 switch (hdev->dev_type) {
2889 case HCI_BREDR:
2890 return hci_conn_hash_lookup_handle(hdev, handle);
2891 case HCI_AMP:
2892 chan = hci_chan_lookup_handle(hdev, handle);
2893 if (chan)
2894 return chan->conn;
2895 break;
2896 default:
2897 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2898 break;
2899 }
2900
2901 return NULL;
2902 }
2903
2904 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2905 {
2906 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2907 int i;
2908
2909 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2910 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2911 return;
2912 }
2913
2914 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2915 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2916 BT_DBG("%s bad parameters", hdev->name);
2917 return;
2918 }
2919
2920 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2921 ev->num_hndl);
2922
2923 for (i = 0; i < ev->num_hndl; i++) {
2924 struct hci_comp_blocks_info *info = &ev->handles[i];
2925 struct hci_conn *conn = NULL;
2926 __u16 handle, block_count;
2927
2928 handle = __le16_to_cpu(info->handle);
2929 block_count = __le16_to_cpu(info->blocks);
2930
2931 conn = __hci_conn_lookup_handle(hdev, handle);
2932 if (!conn)
2933 continue;
2934
2935 conn->sent -= block_count;
2936
2937 switch (conn->type) {
2938 case ACL_LINK:
2939 case AMP_LINK:
2940 hdev->block_cnt += block_count;
2941 if (hdev->block_cnt > hdev->num_blocks)
2942 hdev->block_cnt = hdev->num_blocks;
2943 break;
2944
2945 default:
2946 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2947 break;
2948 }
2949 }
2950
2951 queue_work(hdev->workqueue, &hdev->tx_work);
2952 }
2953
2954 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2955 {
2956 struct hci_ev_mode_change *ev = (void *) skb->data;
2957 struct hci_conn *conn;
2958
2959 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2960
2961 hci_dev_lock(hdev);
2962
2963 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2964 if (conn) {
2965 conn->mode = ev->mode;
2966 conn->interval = __le16_to_cpu(ev->interval);
2967
2968 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2969 &conn->flags)) {
2970 if (conn->mode == HCI_CM_ACTIVE)
2971 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2972 else
2973 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2974 }
2975
2976 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2977 hci_sco_setup(conn, ev->status);
2978 }
2979
2980 hci_dev_unlock(hdev);
2981 }
2982
2983 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2984 {
2985 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2986 struct hci_conn *conn;
2987
2988 BT_DBG("%s", hdev->name);
2989
2990 hci_dev_lock(hdev);
2991
2992 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2993 if (!conn)
2994 goto unlock;
2995
2996 if (conn->state == BT_CONNECTED) {
2997 hci_conn_hold(conn);
2998 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2999 hci_conn_put(conn);
3000 }
3001
3002 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3003 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3004 sizeof(ev->bdaddr), &ev->bdaddr);
3005 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3006 u8 secure;
3007
3008 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3009 secure = 1;
3010 else
3011 secure = 0;
3012
3013 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3014 }
3015
3016 unlock:
3017 hci_dev_unlock(hdev);
3018 }
3019
3020 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3021 {
3022 struct hci_ev_link_key_req *ev = (void *) skb->data;
3023 struct hci_cp_link_key_reply cp;
3024 struct hci_conn *conn;
3025 struct link_key *key;
3026
3027 BT_DBG("%s", hdev->name);
3028
3029 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
3030 return;
3031
3032 hci_dev_lock(hdev);
3033
3034 key = hci_find_link_key(hdev, &ev->bdaddr);
3035 if (!key) {
3036 BT_DBG("%s link key not found for %pMR", hdev->name,
3037 &ev->bdaddr);
3038 goto not_found;
3039 }
3040
3041 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3042 &ev->bdaddr);
3043
3044 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
3045 key->type == HCI_LK_DEBUG_COMBINATION) {
3046 BT_DBG("%s ignoring debug key", hdev->name);
3047 goto not_found;
3048 }
3049
3050 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3051 if (conn) {
3052 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
3053 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3054 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3055 goto not_found;
3056 }
3057
3058 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3059 conn->pending_sec_level == BT_SECURITY_HIGH) {
3060 BT_DBG("%s ignoring key unauthenticated for high security",
3061 hdev->name);
3062 goto not_found;
3063 }
3064
3065 conn->key_type = key->type;
3066 conn->pin_length = key->pin_len;
3067 }
3068
3069 bacpy(&cp.bdaddr, &ev->bdaddr);
3070 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3071
3072 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3073
3074 hci_dev_unlock(hdev);
3075
3076 return;
3077
3078 not_found:
3079 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3080 hci_dev_unlock(hdev);
3081 }
3082
3083 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3084 {
3085 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3086 struct hci_conn *conn;
3087 u8 pin_len = 0;
3088
3089 BT_DBG("%s", hdev->name);
3090
3091 hci_dev_lock(hdev);
3092
3093 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3094 if (conn) {
3095 hci_conn_hold(conn);
3096 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3097 pin_len = conn->pin_length;
3098
3099 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3100 conn->key_type = ev->key_type;
3101
3102 hci_conn_put(conn);
3103 }
3104
3105 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
3106 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
3107 ev->key_type, pin_len);
3108
3109 hci_dev_unlock(hdev);
3110 }
3111
3112 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3113 {
3114 struct hci_ev_clock_offset *ev = (void *) skb->data;
3115 struct hci_conn *conn;
3116
3117 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3118
3119 hci_dev_lock(hdev);
3120
3121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3122 if (conn && !ev->status) {
3123 struct inquiry_entry *ie;
3124
3125 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3126 if (ie) {
3127 ie->data.clock_offset = ev->clock_offset;
3128 ie->timestamp = jiffies;
3129 }
3130 }
3131
3132 hci_dev_unlock(hdev);
3133 }
3134
3135 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3136 {
3137 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3138 struct hci_conn *conn;
3139
3140 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3141
3142 hci_dev_lock(hdev);
3143
3144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3145 if (conn && !ev->status)
3146 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3147
3148 hci_dev_unlock(hdev);
3149 }
3150
3151 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3152 {
3153 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3154 struct inquiry_entry *ie;
3155
3156 BT_DBG("%s", hdev->name);
3157
3158 hci_dev_lock(hdev);
3159
3160 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3161 if (ie) {
3162 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3163 ie->timestamp = jiffies;
3164 }
3165
3166 hci_dev_unlock(hdev);
3167 }
3168
3169 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3170 struct sk_buff *skb)
3171 {
3172 struct inquiry_data data;
3173 int num_rsp = *((__u8 *) skb->data);
3174 bool name_known, ssp;
3175
3176 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3177
3178 if (!num_rsp)
3179 return;
3180
3181 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3182 return;
3183
3184 hci_dev_lock(hdev);
3185
3186 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3187 struct inquiry_info_with_rssi_and_pscan_mode *info;
3188 info = (void *) (skb->data + 1);
3189
3190 for (; num_rsp; num_rsp--, info++) {
3191 bacpy(&data.bdaddr, &info->bdaddr);
3192 data.pscan_rep_mode = info->pscan_rep_mode;
3193 data.pscan_period_mode = info->pscan_period_mode;
3194 data.pscan_mode = info->pscan_mode;
3195 memcpy(data.dev_class, info->dev_class, 3);
3196 data.clock_offset = info->clock_offset;
3197 data.rssi = info->rssi;
3198 data.ssp_mode = 0x00;
3199
3200 name_known = hci_inquiry_cache_update(hdev, &data,
3201 false, &ssp);
3202 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3203 info->dev_class, info->rssi,
3204 !name_known, ssp, NULL, 0);
3205 }
3206 } else {
3207 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3208
3209 for (; num_rsp; num_rsp--, info++) {
3210 bacpy(&data.bdaddr, &info->bdaddr);
3211 data.pscan_rep_mode = info->pscan_rep_mode;
3212 data.pscan_period_mode = info->pscan_period_mode;
3213 data.pscan_mode = 0x00;
3214 memcpy(data.dev_class, info->dev_class, 3);
3215 data.clock_offset = info->clock_offset;
3216 data.rssi = info->rssi;
3217 data.ssp_mode = 0x00;
3218 name_known = hci_inquiry_cache_update(hdev, &data,
3219 false, &ssp);
3220 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3221 info->dev_class, info->rssi,
3222 !name_known, ssp, NULL, 0);
3223 }
3224 }
3225
3226 hci_dev_unlock(hdev);
3227 }
3228
3229 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3230 struct sk_buff *skb)
3231 {
3232 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3233 struct hci_conn *conn;
3234
3235 BT_DBG("%s", hdev->name);
3236
3237 hci_dev_lock(hdev);
3238
3239 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3240 if (!conn)
3241 goto unlock;
3242
3243 if (!ev->status && ev->page == 0x01) {
3244 struct inquiry_entry *ie;
3245
3246 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3247 if (ie)
3248 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3249
3250 if (ev->features[0] & LMP_HOST_SSP)
3251 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3252 }
3253
3254 if (conn->state != BT_CONFIG)
3255 goto unlock;
3256
3257 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3258 struct hci_cp_remote_name_req cp;
3259 memset(&cp, 0, sizeof(cp));
3260 bacpy(&cp.bdaddr, &conn->dst);
3261 cp.pscan_rep_mode = 0x02;
3262 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3263 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3264 mgmt_device_connected(hdev, &conn->dst, conn->type,
3265 conn->dst_type, 0, NULL, 0,
3266 conn->dev_class);
3267
3268 if (!hci_outgoing_auth_needed(hdev, conn)) {
3269 conn->state = BT_CONNECTED;
3270 hci_proto_connect_cfm(conn, ev->status);
3271 hci_conn_put(conn);
3272 }
3273
3274 unlock:
3275 hci_dev_unlock(hdev);
3276 }
3277
3278 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3279 struct sk_buff *skb)
3280 {
3281 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3282 struct hci_conn *conn;
3283
3284 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3285
3286 hci_dev_lock(hdev);
3287
3288 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3289 if (!conn) {
3290 if (ev->link_type == ESCO_LINK)
3291 goto unlock;
3292
3293 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3294 if (!conn)
3295 goto unlock;
3296
3297 conn->type = SCO_LINK;
3298 }
3299
3300 switch (ev->status) {
3301 case 0x00:
3302 conn->handle = __le16_to_cpu(ev->handle);
3303 conn->state = BT_CONNECTED;
3304
3305 hci_conn_hold_device(conn);
3306 hci_conn_add_sysfs(conn);
3307 break;
3308
3309 case 0x11: /* Unsupported Feature or Parameter Value */
3310 case 0x1c: /* SCO interval rejected */
3311 case 0x1a: /* Unsupported Remote Feature */
3312 case 0x1f: /* Unspecified error */
3313 if (conn->out && conn->attempt < 2) {
3314 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3315 (hdev->esco_type & EDR_ESCO_MASK);
3316 hci_setup_sync(conn, conn->link->handle);
3317 goto unlock;
3318 }
3319 /* fall through */
3320
3321 default:
3322 conn->state = BT_CLOSED;
3323 break;
3324 }
3325
3326 hci_proto_connect_cfm(conn, ev->status);
3327 if (ev->status)
3328 hci_conn_del(conn);
3329
3330 unlock:
3331 hci_dev_unlock(hdev);
3332 }
3333
3334 static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
3335 {
3336 BT_DBG("%s", hdev->name);
3337 }
3338
3339 static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
3340 {
3341 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
3342
3343 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3344 }
3345
3346 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3347 struct sk_buff *skb)
3348 {
3349 struct inquiry_data data;
3350 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3351 int num_rsp = *((__u8 *) skb->data);
3352 size_t eir_len;
3353
3354 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3355
3356 if (!num_rsp)
3357 return;
3358
3359 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3360 return;
3361
3362 hci_dev_lock(hdev);
3363
3364 for (; num_rsp; num_rsp--, info++) {
3365 bool name_known, ssp;
3366
3367 bacpy(&data.bdaddr, &info->bdaddr);
3368 data.pscan_rep_mode = info->pscan_rep_mode;
3369 data.pscan_period_mode = info->pscan_period_mode;
3370 data.pscan_mode = 0x00;
3371 memcpy(data.dev_class, info->dev_class, 3);
3372 data.clock_offset = info->clock_offset;
3373 data.rssi = info->rssi;
3374 data.ssp_mode = 0x01;
3375
3376 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3377 name_known = eir_has_data_type(info->data,
3378 sizeof(info->data),
3379 EIR_NAME_COMPLETE);
3380 else
3381 name_known = true;
3382
3383 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3384 &ssp);
3385 eir_len = eir_get_length(info->data, sizeof(info->data));
3386 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3387 info->dev_class, info->rssi, !name_known,
3388 ssp, info->data, eir_len);
3389 }
3390
3391 hci_dev_unlock(hdev);
3392 }
3393
3394 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3395 struct sk_buff *skb)
3396 {
3397 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3398 struct hci_conn *conn;
3399
3400 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3401 __le16_to_cpu(ev->handle));
3402
3403 hci_dev_lock(hdev);
3404
3405 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3406 if (!conn)
3407 goto unlock;
3408
3409 if (!ev->status)
3410 conn->sec_level = conn->pending_sec_level;
3411
3412 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3413
3414 if (ev->status && conn->state == BT_CONNECTED) {
3415 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3416 hci_conn_put(conn);
3417 goto unlock;
3418 }
3419
3420 if (conn->state == BT_CONFIG) {
3421 if (!ev->status)
3422 conn->state = BT_CONNECTED;
3423
3424 hci_proto_connect_cfm(conn, ev->status);
3425 hci_conn_put(conn);
3426 } else {
3427 hci_auth_cfm(conn, ev->status);
3428
3429 hci_conn_hold(conn);
3430 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3431 hci_conn_put(conn);
3432 }
3433
3434 unlock:
3435 hci_dev_unlock(hdev);
3436 }
3437
3438 static u8 hci_get_auth_req(struct hci_conn *conn)
3439 {
3440 /* If remote requests dedicated bonding follow that lead */
3441 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3442 /* If both remote and local IO capabilities allow MITM
3443 * protection then require it, otherwise don't */
3444 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3445 return 0x02;
3446 else
3447 return 0x03;
3448 }
3449
3450 /* If remote requests no-bonding follow that lead */
3451 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3452 return conn->remote_auth | (conn->auth_type & 0x01);
3453
3454 return conn->auth_type;
3455 }
3456
3457 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3458 {
3459 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3460 struct hci_conn *conn;
3461
3462 BT_DBG("%s", hdev->name);
3463
3464 hci_dev_lock(hdev);
3465
3466 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3467 if (!conn)
3468 goto unlock;
3469
3470 hci_conn_hold(conn);
3471
3472 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3473 goto unlock;
3474
3475 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3476 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3477 struct hci_cp_io_capability_reply cp;
3478
3479 bacpy(&cp.bdaddr, &ev->bdaddr);
3480 /* Change the IO capability from KeyboardDisplay
3481 * to DisplayYesNo as it is not supported by BT spec. */
3482 cp.capability = (conn->io_capability == 0x04) ?
3483 0x01 : conn->io_capability;
3484 conn->auth_type = hci_get_auth_req(conn);
3485 cp.authentication = conn->auth_type;
3486
3487 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3488 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3489 cp.oob_data = 0x01;
3490 else
3491 cp.oob_data = 0x00;
3492
3493 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3494 sizeof(cp), &cp);
3495 } else {
3496 struct hci_cp_io_capability_neg_reply cp;
3497
3498 bacpy(&cp.bdaddr, &ev->bdaddr);
3499 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3500
3501 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3502 sizeof(cp), &cp);
3503 }
3504
3505 unlock:
3506 hci_dev_unlock(hdev);
3507 }
3508
3509 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3510 {
3511 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3512 struct hci_conn *conn;
3513
3514 BT_DBG("%s", hdev->name);
3515
3516 hci_dev_lock(hdev);
3517
3518 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3519 if (!conn)
3520 goto unlock;
3521
3522 conn->remote_cap = ev->capability;
3523 conn->remote_auth = ev->authentication;
3524 if (ev->oob_data)
3525 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3526
3527 unlock:
3528 hci_dev_unlock(hdev);
3529 }
3530
3531 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3532 struct sk_buff *skb)
3533 {
3534 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3535 int loc_mitm, rem_mitm, confirm_hint = 0;
3536 struct hci_conn *conn;
3537
3538 BT_DBG("%s", hdev->name);
3539
3540 hci_dev_lock(hdev);
3541
3542 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3543 goto unlock;
3544
3545 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3546 if (!conn)
3547 goto unlock;
3548
3549 loc_mitm = (conn->auth_type & 0x01);
3550 rem_mitm = (conn->remote_auth & 0x01);
3551
3552 /* If we require MITM but the remote device can't provide that
3553 * (it has NoInputNoOutput) then reject the confirmation
3554 * request. The only exception is when we're dedicated bonding
3555 * initiators (connect_cfm_cb set) since then we always have the MITM
3556 * bit set. */
3557 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3558 BT_DBG("Rejecting request: remote device can't provide MITM");
3559 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3560 sizeof(ev->bdaddr), &ev->bdaddr);
3561 goto unlock;
3562 }
3563
3564 /* If no side requires MITM protection; auto-accept */
3565 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3566 (!rem_mitm || conn->io_capability == 0x03)) {
3567
3568 /* If we're not the initiators request authorization to
3569 * proceed from user space (mgmt_user_confirm with
3570 * confirm_hint set to 1). */
3571 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3572 BT_DBG("Confirming auto-accept as acceptor");
3573 confirm_hint = 1;
3574 goto confirm;
3575 }
3576
3577 BT_DBG("Auto-accept of user confirmation with %ums delay",
3578 hdev->auto_accept_delay);
3579
3580 if (hdev->auto_accept_delay > 0) {
3581 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3582 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3583 goto unlock;
3584 }
3585
3586 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3587 sizeof(ev->bdaddr), &ev->bdaddr);
3588 goto unlock;
3589 }
3590
3591 confirm:
3592 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3593 confirm_hint);
3594
3595 unlock:
3596 hci_dev_unlock(hdev);
3597 }
3598
3599 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3600 struct sk_buff *skb)
3601 {
3602 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3603
3604 BT_DBG("%s", hdev->name);
3605
3606 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3607 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3608 }
3609
3610 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3611 struct sk_buff *skb)
3612 {
3613 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3614 struct hci_conn *conn;
3615
3616 BT_DBG("%s", hdev->name);
3617
3618 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3619 if (!conn)
3620 return;
3621
3622 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3623 conn->passkey_entered = 0;
3624
3625 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3626 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3627 conn->dst_type, conn->passkey_notify,
3628 conn->passkey_entered);
3629 }
3630
3631 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3632 {
3633 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3634 struct hci_conn *conn;
3635
3636 BT_DBG("%s", hdev->name);
3637
3638 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3639 if (!conn)
3640 return;
3641
3642 switch (ev->type) {
3643 case HCI_KEYPRESS_STARTED:
3644 conn->passkey_entered = 0;
3645 return;
3646
3647 case HCI_KEYPRESS_ENTERED:
3648 conn->passkey_entered++;
3649 break;
3650
3651 case HCI_KEYPRESS_ERASED:
3652 conn->passkey_entered--;
3653 break;
3654
3655 case HCI_KEYPRESS_CLEARED:
3656 conn->passkey_entered = 0;
3657 break;
3658
3659 case HCI_KEYPRESS_COMPLETED:
3660 return;
3661 }
3662
3663 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3664 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3665 conn->dst_type, conn->passkey_notify,
3666 conn->passkey_entered);
3667 }
3668
3669 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3670 struct sk_buff *skb)
3671 {
3672 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3673 struct hci_conn *conn;
3674
3675 BT_DBG("%s", hdev->name);
3676
3677 hci_dev_lock(hdev);
3678
3679 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3680 if (!conn)
3681 goto unlock;
3682
3683 /* To avoid duplicate auth_failed events to user space we check
3684 * the HCI_CONN_AUTH_PEND flag which will be set if we
3685 * initiated the authentication. A traditional auth_complete
3686 * event gets always produced as initiator and is also mapped to
3687 * the mgmt_auth_failed event */
3688 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3689 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3690 ev->status);
3691
3692 hci_conn_put(conn);
3693
3694 unlock:
3695 hci_dev_unlock(hdev);
3696 }
3697
3698 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3699 struct sk_buff *skb)
3700 {
3701 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3702 struct inquiry_entry *ie;
3703
3704 BT_DBG("%s", hdev->name);
3705
3706 hci_dev_lock(hdev);
3707
3708 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3709 if (ie)
3710 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3711
3712 hci_dev_unlock(hdev);
3713 }
3714
3715 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3716 struct sk_buff *skb)
3717 {
3718 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3719 struct oob_data *data;
3720
3721 BT_DBG("%s", hdev->name);
3722
3723 hci_dev_lock(hdev);
3724
3725 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3726 goto unlock;
3727
3728 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3729 if (data) {
3730 struct hci_cp_remote_oob_data_reply cp;
3731
3732 bacpy(&cp.bdaddr, &ev->bdaddr);
3733 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3734 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3735
3736 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3737 &cp);
3738 } else {
3739 struct hci_cp_remote_oob_data_neg_reply cp;
3740
3741 bacpy(&cp.bdaddr, &ev->bdaddr);
3742 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3743 &cp);
3744 }
3745
3746 unlock:
3747 hci_dev_unlock(hdev);
3748 }
3749
3750 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3751 struct sk_buff *skb)
3752 {
3753 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3754 struct hci_conn *hcon, *bredr_hcon;
3755
3756 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3757 ev->status);
3758
3759 hci_dev_lock(hdev);
3760
3761 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3762 if (!hcon) {
3763 hci_dev_unlock(hdev);
3764 return;
3765 }
3766
3767 if (ev->status) {
3768 hci_conn_del(hcon);
3769 hci_dev_unlock(hdev);
3770 return;
3771 }
3772
3773 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3774
3775 hcon->state = BT_CONNECTED;
3776 bacpy(&hcon->dst, &bredr_hcon->dst);
3777
3778 hci_conn_hold(hcon);
3779 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3780 hci_conn_put(hcon);
3781
3782 hci_conn_hold_device(hcon);
3783 hci_conn_add_sysfs(hcon);
3784
3785 amp_physical_cfm(bredr_hcon, hcon);
3786
3787 hci_dev_unlock(hdev);
3788 }
3789
3790 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3791 {
3792 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3793 struct hci_conn *hcon;
3794 struct hci_chan *hchan;
3795 struct amp_mgr *mgr;
3796
3797 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3798 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3799 ev->status);
3800
3801 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3802 if (!hcon)
3803 return;
3804
3805 /* Create AMP hchan */
3806 hchan = hci_chan_create(hcon);
3807 if (!hchan)
3808 return;
3809
3810 hchan->handle = le16_to_cpu(ev->handle);
3811
3812 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3813
3814 mgr = hcon->amp_mgr;
3815 if (mgr && mgr->bredr_chan) {
3816 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3817
3818 l2cap_chan_lock(bredr_chan);
3819
3820 bredr_chan->conn->mtu = hdev->block_mtu;
3821 l2cap_logical_cfm(bredr_chan, hchan, 0);
3822 hci_conn_hold(hcon);
3823
3824 l2cap_chan_unlock(bredr_chan);
3825 }
3826 }
3827
3828 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3829 struct sk_buff *skb)
3830 {
3831 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3832 struct hci_chan *hchan;
3833
3834 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3835 le16_to_cpu(ev->handle), ev->status);
3836
3837 if (ev->status)
3838 return;
3839
3840 hci_dev_lock(hdev);
3841
3842 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3843 if (!hchan)
3844 goto unlock;
3845
3846 amp_destroy_logical_link(hchan, ev->reason);
3847
3848 unlock:
3849 hci_dev_unlock(hdev);
3850 }
3851
3852 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3853 struct sk_buff *skb)
3854 {
3855 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3856 struct hci_conn *hcon;
3857
3858 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3859
3860 if (ev->status)
3861 return;
3862
3863 hci_dev_lock(hdev);
3864
3865 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3866 if (hcon) {
3867 hcon->state = BT_CLOSED;
3868 hci_conn_del(hcon);
3869 }
3870
3871 hci_dev_unlock(hdev);
3872 }
3873
3874 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3875 {
3876 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3877 struct hci_conn *conn;
3878
3879 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3880
3881 hci_dev_lock(hdev);
3882
3883 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3884 if (!conn) {
3885 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3886 if (!conn) {
3887 BT_ERR("No memory for new connection");
3888 goto unlock;
3889 }
3890
3891 conn->dst_type = ev->bdaddr_type;
3892
3893 if (ev->role == LE_CONN_ROLE_MASTER) {
3894 conn->out = true;
3895 conn->link_mode |= HCI_LM_MASTER;
3896 }
3897 }
3898
3899 if (ev->status) {
3900 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3901 conn->dst_type, ev->status);
3902 hci_proto_connect_cfm(conn, ev->status);
3903 conn->state = BT_CLOSED;
3904 hci_conn_del(conn);
3905 goto unlock;
3906 }
3907
3908 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3909 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3910 conn->dst_type, 0, NULL, 0, NULL);
3911
3912 conn->sec_level = BT_SECURITY_LOW;
3913 conn->handle = __le16_to_cpu(ev->handle);
3914 conn->state = BT_CONNECTED;
3915
3916 hci_conn_hold_device(conn);
3917 hci_conn_add_sysfs(conn);
3918
3919 hci_proto_connect_cfm(conn, ev->status);
3920
3921 unlock:
3922 hci_dev_unlock(hdev);
3923 }
3924
3925 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3926 {
3927 u8 num_reports = skb->data[0];
3928 void *ptr = &skb->data[1];
3929 s8 rssi;
3930
3931 hci_dev_lock(hdev);
3932
3933 while (num_reports--) {
3934 struct hci_ev_le_advertising_info *ev = ptr;
3935
3936 rssi = ev->data[ev->length];
3937 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3938 NULL, rssi, 0, 1, ev->data, ev->length);
3939
3940 ptr += sizeof(*ev) + ev->length + 1;
3941 }
3942
3943 hci_dev_unlock(hdev);
3944 }
3945
3946 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3947 {
3948 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3949 struct hci_cp_le_ltk_reply cp;
3950 struct hci_cp_le_ltk_neg_reply neg;
3951 struct hci_conn *conn;
3952 struct smp_ltk *ltk;
3953
3954 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3955
3956 hci_dev_lock(hdev);
3957
3958 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3959 if (conn == NULL)
3960 goto not_found;
3961
3962 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3963 if (ltk == NULL)
3964 goto not_found;
3965
3966 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3967 cp.handle = cpu_to_le16(conn->handle);
3968
3969 if (ltk->authenticated)
3970 conn->sec_level = BT_SECURITY_HIGH;
3971
3972 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3973
3974 if (ltk->type & HCI_SMP_STK) {
3975 list_del(&ltk->list);
3976 kfree(ltk);
3977 }
3978
3979 hci_dev_unlock(hdev);
3980
3981 return;
3982
3983 not_found:
3984 neg.handle = ev->handle;
3985 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3986 hci_dev_unlock(hdev);
3987 }
3988
3989 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3990 {
3991 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3992
3993 skb_pull(skb, sizeof(*le_ev));
3994
3995 switch (le_ev->subevent) {
3996 case HCI_EV_LE_CONN_COMPLETE:
3997 hci_le_conn_complete_evt(hdev, skb);
3998 break;
3999
4000 case HCI_EV_LE_ADVERTISING_REPORT:
4001 hci_le_adv_report_evt(hdev, skb);
4002 break;
4003
4004 case HCI_EV_LE_LTK_REQ:
4005 hci_le_ltk_request_evt(hdev, skb);
4006 break;
4007
4008 default:
4009 break;
4010 }
4011 }
4012
4013 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4014 {
4015 struct hci_ev_channel_selected *ev = (void *) skb->data;
4016 struct hci_conn *hcon;
4017
4018 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4019
4020 skb_pull(skb, sizeof(*ev));
4021
4022 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4023 if (!hcon)
4024 return;
4025
4026 amp_read_loc_assoc_final_data(hdev, hcon);
4027 }
4028
4029 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4030 {
4031 struct hci_event_hdr *hdr = (void *) skb->data;
4032 __u8 event = hdr->evt;
4033
4034 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4035
4036 switch (event) {
4037 case HCI_EV_INQUIRY_COMPLETE:
4038 hci_inquiry_complete_evt(hdev, skb);
4039 break;
4040
4041 case HCI_EV_INQUIRY_RESULT:
4042 hci_inquiry_result_evt(hdev, skb);
4043 break;
4044
4045 case HCI_EV_CONN_COMPLETE:
4046 hci_conn_complete_evt(hdev, skb);
4047 break;
4048
4049 case HCI_EV_CONN_REQUEST:
4050 hci_conn_request_evt(hdev, skb);
4051 break;
4052
4053 case HCI_EV_DISCONN_COMPLETE:
4054 hci_disconn_complete_evt(hdev, skb);
4055 break;
4056
4057 case HCI_EV_AUTH_COMPLETE:
4058 hci_auth_complete_evt(hdev, skb);
4059 break;
4060
4061 case HCI_EV_REMOTE_NAME:
4062 hci_remote_name_evt(hdev, skb);
4063 break;
4064
4065 case HCI_EV_ENCRYPT_CHANGE:
4066 hci_encrypt_change_evt(hdev, skb);
4067 break;
4068
4069 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4070 hci_change_link_key_complete_evt(hdev, skb);
4071 break;
4072
4073 case HCI_EV_REMOTE_FEATURES:
4074 hci_remote_features_evt(hdev, skb);
4075 break;
4076
4077 case HCI_EV_REMOTE_VERSION:
4078 hci_remote_version_evt(hdev, skb);
4079 break;
4080
4081 case HCI_EV_QOS_SETUP_COMPLETE:
4082 hci_qos_setup_complete_evt(hdev, skb);
4083 break;
4084
4085 case HCI_EV_CMD_COMPLETE:
4086 hci_cmd_complete_evt(hdev, skb);
4087 break;
4088
4089 case HCI_EV_CMD_STATUS:
4090 hci_cmd_status_evt(hdev, skb);
4091 break;
4092
4093 case HCI_EV_ROLE_CHANGE:
4094 hci_role_change_evt(hdev, skb);
4095 break;
4096
4097 case HCI_EV_NUM_COMP_PKTS:
4098 hci_num_comp_pkts_evt(hdev, skb);
4099 break;
4100
4101 case HCI_EV_MODE_CHANGE:
4102 hci_mode_change_evt(hdev, skb);
4103 break;
4104
4105 case HCI_EV_PIN_CODE_REQ:
4106 hci_pin_code_request_evt(hdev, skb);
4107 break;
4108
4109 case HCI_EV_LINK_KEY_REQ:
4110 hci_link_key_request_evt(hdev, skb);
4111 break;
4112
4113 case HCI_EV_LINK_KEY_NOTIFY:
4114 hci_link_key_notify_evt(hdev, skb);
4115 break;
4116
4117 case HCI_EV_CLOCK_OFFSET:
4118 hci_clock_offset_evt(hdev, skb);
4119 break;
4120
4121 case HCI_EV_PKT_TYPE_CHANGE:
4122 hci_pkt_type_change_evt(hdev, skb);
4123 break;
4124
4125 case HCI_EV_PSCAN_REP_MODE:
4126 hci_pscan_rep_mode_evt(hdev, skb);
4127 break;
4128
4129 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4130 hci_inquiry_result_with_rssi_evt(hdev, skb);
4131 break;
4132
4133 case HCI_EV_REMOTE_EXT_FEATURES:
4134 hci_remote_ext_features_evt(hdev, skb);
4135 break;
4136
4137 case HCI_EV_SYNC_CONN_COMPLETE:
4138 hci_sync_conn_complete_evt(hdev, skb);
4139 break;
4140
4141 case HCI_EV_SYNC_CONN_CHANGED:
4142 hci_sync_conn_changed_evt(hdev, skb);
4143 break;
4144
4145 case HCI_EV_SNIFF_SUBRATE:
4146 hci_sniff_subrate_evt(hdev, skb);
4147 break;
4148
4149 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4150 hci_extended_inquiry_result_evt(hdev, skb);
4151 break;
4152
4153 case HCI_EV_KEY_REFRESH_COMPLETE:
4154 hci_key_refresh_complete_evt(hdev, skb);
4155 break;
4156
4157 case HCI_EV_IO_CAPA_REQUEST:
4158 hci_io_capa_request_evt(hdev, skb);
4159 break;
4160
4161 case HCI_EV_IO_CAPA_REPLY:
4162 hci_io_capa_reply_evt(hdev, skb);
4163 break;
4164
4165 case HCI_EV_USER_CONFIRM_REQUEST:
4166 hci_user_confirm_request_evt(hdev, skb);
4167 break;
4168
4169 case HCI_EV_USER_PASSKEY_REQUEST:
4170 hci_user_passkey_request_evt(hdev, skb);
4171 break;
4172
4173 case HCI_EV_USER_PASSKEY_NOTIFY:
4174 hci_user_passkey_notify_evt(hdev, skb);
4175 break;
4176
4177 case HCI_EV_KEYPRESS_NOTIFY:
4178 hci_keypress_notify_evt(hdev, skb);
4179 break;
4180
4181 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4182 hci_simple_pair_complete_evt(hdev, skb);
4183 break;
4184
4185 case HCI_EV_REMOTE_HOST_FEATURES:
4186 hci_remote_host_features_evt(hdev, skb);
4187 break;
4188
4189 case HCI_EV_LE_META:
4190 hci_le_meta_evt(hdev, skb);
4191 break;
4192
4193 case HCI_EV_CHANNEL_SELECTED:
4194 hci_chan_selected_evt(hdev, skb);
4195 break;
4196
4197 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4198 hci_remote_oob_data_request_evt(hdev, skb);
4199 break;
4200
4201 case HCI_EV_PHY_LINK_COMPLETE:
4202 hci_phy_link_complete_evt(hdev, skb);
4203 break;
4204
4205 case HCI_EV_LOGICAL_LINK_COMPLETE:
4206 hci_loglink_complete_evt(hdev, skb);
4207 break;
4208
4209 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4210 hci_disconn_loglink_complete_evt(hdev, skb);
4211 break;
4212
4213 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4214 hci_disconn_phylink_complete_evt(hdev, skb);
4215 break;
4216
4217 case HCI_EV_NUM_COMP_BLOCKS:
4218 hci_num_comp_blocks_evt(hdev, skb);
4219 break;
4220
4221 default:
4222 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4223 break;
4224 }
4225
4226 kfree_skb(skb);
4227 hdev->stat.evt_rx++;
4228 }