Bluetooth: Convert hdev->out to a bool type
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 static bool enable_le;
49
50 /* Handle HCI Event packets */
51
52 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
53 {
54 __u8 status = *((__u8 *) skb->data);
55
56 BT_DBG("%s status 0x%x", hdev->name, status);
57
58 if (status) {
59 hci_dev_lock(hdev);
60 mgmt_stop_discovery_failed(hdev, status);
61 hci_dev_unlock(hdev);
62 return;
63 }
64
65 clear_bit(HCI_INQUIRY, &hdev->flags);
66
67 hci_dev_lock(hdev);
68 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
69 hci_dev_unlock(hdev);
70
71 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
72
73 hci_conn_check_pending(hdev);
74 }
75
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77 {
78 __u8 status = *((__u8 *) skb->data);
79
80 BT_DBG("%s status 0x%x", hdev->name, status);
81
82 if (status)
83 return;
84
85 hci_conn_check_pending(hdev);
86 }
87
88 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160 {
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163 BT_DBG("%s status 0x%x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 hdev->link_policy = __le16_to_cpu(rp->policy);
169 }
170
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
172 {
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%x", hdev->name, status);
177
178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 if (!sent)
180 return;
181
182 if (!status)
183 hdev->link_policy = get_unaligned_le16(sent);
184
185 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 hci_req_complete(hdev, HCI_OP_RESET, status);
197
198 /* Reset all flags, except persistent ones */
199 hdev->dev_flags &= BIT(HCI_MGMT) | BIT(HCI_SETUP) | BIT(HCI_AUTO_OFF);
200 }
201
202 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
203 {
204 __u8 status = *((__u8 *) skb->data);
205 void *sent;
206
207 BT_DBG("%s status 0x%x", hdev->name, status);
208
209 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
210 if (!sent)
211 return;
212
213 hci_dev_lock(hdev);
214
215 if (test_bit(HCI_MGMT, &hdev->dev_flags))
216 mgmt_set_local_name_complete(hdev, sent, status);
217
218 if (status == 0)
219 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
220
221 hci_dev_unlock(hdev);
222 }
223
224 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
225 {
226 struct hci_rp_read_local_name *rp = (void *) skb->data;
227
228 BT_DBG("%s status 0x%x", hdev->name, rp->status);
229
230 if (rp->status)
231 return;
232
233 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
234 }
235
236 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
237 {
238 __u8 status = *((__u8 *) skb->data);
239 void *sent;
240
241 BT_DBG("%s status 0x%x", hdev->name, status);
242
243 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
244 if (!sent)
245 return;
246
247 if (!status) {
248 __u8 param = *((__u8 *) sent);
249
250 if (param == AUTH_ENABLED)
251 set_bit(HCI_AUTH, &hdev->flags);
252 else
253 clear_bit(HCI_AUTH, &hdev->flags);
254 }
255
256 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
257 }
258
259 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
260 {
261 __u8 status = *((__u8 *) skb->data);
262 void *sent;
263
264 BT_DBG("%s status 0x%x", hdev->name, status);
265
266 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
267 if (!sent)
268 return;
269
270 if (!status) {
271 __u8 param = *((__u8 *) sent);
272
273 if (param)
274 set_bit(HCI_ENCRYPT, &hdev->flags);
275 else
276 clear_bit(HCI_ENCRYPT, &hdev->flags);
277 }
278
279 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
280 }
281
282 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
283 {
284 __u8 param, status = *((__u8 *) skb->data);
285 int old_pscan, old_iscan;
286 void *sent;
287
288 BT_DBG("%s status 0x%x", hdev->name, status);
289
290 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
291 if (!sent)
292 return;
293
294 param = *((__u8 *) sent);
295
296 hci_dev_lock(hdev);
297
298 if (status != 0) {
299 mgmt_write_scan_failed(hdev, param, status);
300 hdev->discov_timeout = 0;
301 goto done;
302 }
303
304 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
305 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
306
307 if (param & SCAN_INQUIRY) {
308 set_bit(HCI_ISCAN, &hdev->flags);
309 if (!old_iscan)
310 mgmt_discoverable(hdev, 1);
311 if (hdev->discov_timeout > 0) {
312 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
313 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
314 to);
315 }
316 } else if (old_iscan)
317 mgmt_discoverable(hdev, 0);
318
319 if (param & SCAN_PAGE) {
320 set_bit(HCI_PSCAN, &hdev->flags);
321 if (!old_pscan)
322 mgmt_connectable(hdev, 1);
323 } else if (old_pscan)
324 mgmt_connectable(hdev, 0);
325
326 done:
327 hci_dev_unlock(hdev);
328 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
329 }
330
331 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
332 {
333 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
334
335 BT_DBG("%s status 0x%x", hdev->name, rp->status);
336
337 if (rp->status)
338 return;
339
340 memcpy(hdev->dev_class, rp->dev_class, 3);
341
342 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
343 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
344 }
345
346 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
347 {
348 __u8 status = *((__u8 *) skb->data);
349 void *sent;
350
351 BT_DBG("%s status 0x%x", hdev->name, status);
352
353 if (status)
354 return;
355
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 if (!sent)
358 return;
359
360 memcpy(hdev->dev_class, sent, 3);
361 }
362
363 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
364 {
365 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
366 __u16 setting;
367
368 BT_DBG("%s status 0x%x", hdev->name, rp->status);
369
370 if (rp->status)
371 return;
372
373 setting = __le16_to_cpu(rp->voice_setting);
374
375 if (hdev->voice_setting == setting)
376 return;
377
378 hdev->voice_setting = setting;
379
380 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
381
382 if (hdev->notify)
383 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
384 }
385
386 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
387 {
388 __u8 status = *((__u8 *) skb->data);
389 __u16 setting;
390 void *sent;
391
392 BT_DBG("%s status 0x%x", hdev->name, status);
393
394 if (status)
395 return;
396
397 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
398 if (!sent)
399 return;
400
401 setting = get_unaligned_le16(sent);
402
403 if (hdev->voice_setting == setting)
404 return;
405
406 hdev->voice_setting = setting;
407
408 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
409
410 if (hdev->notify)
411 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
412 }
413
414 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
415 {
416 __u8 status = *((__u8 *) skb->data);
417
418 BT_DBG("%s status 0x%x", hdev->name, status);
419
420 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
421 }
422
423 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
426
427 BT_DBG("%s status 0x%x", hdev->name, rp->status);
428
429 if (rp->status)
430 return;
431
432 hdev->ssp_mode = rp->mode;
433 }
434
435 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
436 {
437 __u8 status = *((__u8 *) skb->data);
438 void *sent;
439
440 BT_DBG("%s status 0x%x", hdev->name, status);
441
442 if (status)
443 return;
444
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
446 if (!sent)
447 return;
448
449 hdev->ssp_mode = *((__u8 *) sent);
450 }
451
452 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
453 {
454 if (hdev->features[6] & LMP_EXT_INQ)
455 return 2;
456
457 if (hdev->features[3] & LMP_RSSI_INQ)
458 return 1;
459
460 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
461 hdev->lmp_subver == 0x0757)
462 return 1;
463
464 if (hdev->manufacturer == 15) {
465 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
466 return 1;
467 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
468 return 1;
469 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
470 return 1;
471 }
472
473 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
474 hdev->lmp_subver == 0x1805)
475 return 1;
476
477 return 0;
478 }
479
480 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
481 {
482 u8 mode;
483
484 mode = hci_get_inquiry_mode(hdev);
485
486 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
487 }
488
489 static void hci_setup_event_mask(struct hci_dev *hdev)
490 {
491 /* The second byte is 0xff instead of 0x9f (two reserved bits
492 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
493 * command otherwise */
494 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
495
496 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
497 * any event mask for pre 1.2 devices */
498 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
499 return;
500
501 events[4] |= 0x01; /* Flow Specification Complete */
502 events[4] |= 0x02; /* Inquiry Result with RSSI */
503 events[4] |= 0x04; /* Read Remote Extended Features Complete */
504 events[5] |= 0x08; /* Synchronous Connection Complete */
505 events[5] |= 0x10; /* Synchronous Connection Changed */
506
507 if (hdev->features[3] & LMP_RSSI_INQ)
508 events[4] |= 0x04; /* Inquiry Result with RSSI */
509
510 if (hdev->features[5] & LMP_SNIFF_SUBR)
511 events[5] |= 0x20; /* Sniff Subrating */
512
513 if (hdev->features[5] & LMP_PAUSE_ENC)
514 events[5] |= 0x80; /* Encryption Key Refresh Complete */
515
516 if (hdev->features[6] & LMP_EXT_INQ)
517 events[5] |= 0x40; /* Extended Inquiry Result */
518
519 if (hdev->features[6] & LMP_NO_FLUSH)
520 events[7] |= 0x01; /* Enhanced Flush Complete */
521
522 if (hdev->features[7] & LMP_LSTO)
523 events[6] |= 0x80; /* Link Supervision Timeout Changed */
524
525 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
526 events[6] |= 0x01; /* IO Capability Request */
527 events[6] |= 0x02; /* IO Capability Response */
528 events[6] |= 0x04; /* User Confirmation Request */
529 events[6] |= 0x08; /* User Passkey Request */
530 events[6] |= 0x10; /* Remote OOB Data Request */
531 events[6] |= 0x20; /* Simple Pairing Complete */
532 events[7] |= 0x04; /* User Passkey Notification */
533 events[7] |= 0x08; /* Keypress Notification */
534 events[7] |= 0x10; /* Remote Host Supported
535 * Features Notification */
536 }
537
538 if (hdev->features[4] & LMP_LE)
539 events[7] |= 0x20; /* LE Meta-Event */
540
541 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
542 }
543
544 static void hci_set_le_support(struct hci_dev *hdev)
545 {
546 struct hci_cp_write_le_host_supported cp;
547
548 memset(&cp, 0, sizeof(cp));
549
550 if (enable_le) {
551 cp.le = 1;
552 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
553 }
554
555 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
556 }
557
558 static void hci_setup(struct hci_dev *hdev)
559 {
560 if (hdev->dev_type != HCI_BREDR)
561 return;
562
563 hci_setup_event_mask(hdev);
564
565 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
566 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
567
568 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
569 u8 mode = 0x01;
570 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
571 }
572
573 if (hdev->features[3] & LMP_RSSI_INQ)
574 hci_setup_inquiry_mode(hdev);
575
576 if (hdev->features[7] & LMP_INQ_TX_PWR)
577 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
578
579 if (hdev->features[7] & LMP_EXTFEATURES) {
580 struct hci_cp_read_local_ext_features cp;
581
582 cp.page = 0x01;
583 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
584 sizeof(cp), &cp);
585 }
586
587 if (hdev->features[4] & LMP_LE)
588 hci_set_le_support(hdev);
589 }
590
591 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
592 {
593 struct hci_rp_read_local_version *rp = (void *) skb->data;
594
595 BT_DBG("%s status 0x%x", hdev->name, rp->status);
596
597 if (rp->status)
598 return;
599
600 hdev->hci_ver = rp->hci_ver;
601 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
602 hdev->lmp_ver = rp->lmp_ver;
603 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
604 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
605
606 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
607 hdev->manufacturer,
608 hdev->hci_ver, hdev->hci_rev);
609
610 if (test_bit(HCI_INIT, &hdev->flags))
611 hci_setup(hdev);
612 }
613
614 static void hci_setup_link_policy(struct hci_dev *hdev)
615 {
616 u16 link_policy = 0;
617
618 if (hdev->features[0] & LMP_RSWITCH)
619 link_policy |= HCI_LP_RSWITCH;
620 if (hdev->features[0] & LMP_HOLD)
621 link_policy |= HCI_LP_HOLD;
622 if (hdev->features[0] & LMP_SNIFF)
623 link_policy |= HCI_LP_SNIFF;
624 if (hdev->features[1] & LMP_PARK)
625 link_policy |= HCI_LP_PARK;
626
627 link_policy = cpu_to_le16(link_policy);
628 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
629 sizeof(link_policy), &link_policy);
630 }
631
632 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
633 {
634 struct hci_rp_read_local_commands *rp = (void *) skb->data;
635
636 BT_DBG("%s status 0x%x", hdev->name, rp->status);
637
638 if (rp->status)
639 goto done;
640
641 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
642
643 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
644 hci_setup_link_policy(hdev);
645
646 done:
647 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
648 }
649
650 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
651 {
652 struct hci_rp_read_local_features *rp = (void *) skb->data;
653
654 BT_DBG("%s status 0x%x", hdev->name, rp->status);
655
656 if (rp->status)
657 return;
658
659 memcpy(hdev->features, rp->features, 8);
660
661 /* Adjust default settings according to features
662 * supported by device. */
663
664 if (hdev->features[0] & LMP_3SLOT)
665 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
666
667 if (hdev->features[0] & LMP_5SLOT)
668 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
669
670 if (hdev->features[1] & LMP_HV2) {
671 hdev->pkt_type |= (HCI_HV2);
672 hdev->esco_type |= (ESCO_HV2);
673 }
674
675 if (hdev->features[1] & LMP_HV3) {
676 hdev->pkt_type |= (HCI_HV3);
677 hdev->esco_type |= (ESCO_HV3);
678 }
679
680 if (hdev->features[3] & LMP_ESCO)
681 hdev->esco_type |= (ESCO_EV3);
682
683 if (hdev->features[4] & LMP_EV4)
684 hdev->esco_type |= (ESCO_EV4);
685
686 if (hdev->features[4] & LMP_EV5)
687 hdev->esco_type |= (ESCO_EV5);
688
689 if (hdev->features[5] & LMP_EDR_ESCO_2M)
690 hdev->esco_type |= (ESCO_2EV3);
691
692 if (hdev->features[5] & LMP_EDR_ESCO_3M)
693 hdev->esco_type |= (ESCO_3EV3);
694
695 if (hdev->features[5] & LMP_EDR_3S_ESCO)
696 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
697
698 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
699 hdev->features[0], hdev->features[1],
700 hdev->features[2], hdev->features[3],
701 hdev->features[4], hdev->features[5],
702 hdev->features[6], hdev->features[7]);
703 }
704
705 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
706 struct sk_buff *skb)
707 {
708 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
709
710 BT_DBG("%s status 0x%x", hdev->name, rp->status);
711
712 if (rp->status)
713 return;
714
715 switch (rp->page) {
716 case 0:
717 memcpy(hdev->features, rp->features, 8);
718 break;
719 case 1:
720 memcpy(hdev->host_features, rp->features, 8);
721 break;
722 }
723
724 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
725 }
726
727 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
728 struct sk_buff *skb)
729 {
730 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
731
732 BT_DBG("%s status 0x%x", hdev->name, rp->status);
733
734 if (rp->status)
735 return;
736
737 hdev->flow_ctl_mode = rp->mode;
738
739 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
740 }
741
742 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
743 {
744 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
745
746 BT_DBG("%s status 0x%x", hdev->name, rp->status);
747
748 if (rp->status)
749 return;
750
751 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
752 hdev->sco_mtu = rp->sco_mtu;
753 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
754 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
755
756 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
757 hdev->sco_mtu = 64;
758 hdev->sco_pkts = 8;
759 }
760
761 hdev->acl_cnt = hdev->acl_pkts;
762 hdev->sco_cnt = hdev->sco_pkts;
763
764 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
765 hdev->acl_mtu, hdev->acl_pkts,
766 hdev->sco_mtu, hdev->sco_pkts);
767 }
768
769 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
770 {
771 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
772
773 BT_DBG("%s status 0x%x", hdev->name, rp->status);
774
775 if (!rp->status)
776 bacpy(&hdev->bdaddr, &rp->bdaddr);
777
778 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
779 }
780
781 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
782 struct sk_buff *skb)
783 {
784 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
785
786 BT_DBG("%s status 0x%x", hdev->name, rp->status);
787
788 if (rp->status)
789 return;
790
791 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
792 hdev->block_len = __le16_to_cpu(rp->block_len);
793 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
794
795 hdev->block_cnt = hdev->num_blocks;
796
797 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
798 hdev->block_cnt, hdev->block_len);
799
800 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
801 }
802
803 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
804 {
805 __u8 status = *((__u8 *) skb->data);
806
807 BT_DBG("%s status 0x%x", hdev->name, status);
808
809 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
810 }
811
812 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
813 struct sk_buff *skb)
814 {
815 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
816
817 BT_DBG("%s status 0x%x", hdev->name, rp->status);
818
819 if (rp->status)
820 return;
821
822 hdev->amp_status = rp->amp_status;
823 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
824 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
825 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
826 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
827 hdev->amp_type = rp->amp_type;
828 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
829 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
830 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
831 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
832
833 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
834 }
835
836 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
837 struct sk_buff *skb)
838 {
839 __u8 status = *((__u8 *) skb->data);
840
841 BT_DBG("%s status 0x%x", hdev->name, status);
842
843 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
844 }
845
846 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
847 {
848 __u8 status = *((__u8 *) skb->data);
849
850 BT_DBG("%s status 0x%x", hdev->name, status);
851
852 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
853 }
854
855 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
856 struct sk_buff *skb)
857 {
858 __u8 status = *((__u8 *) skb->data);
859
860 BT_DBG("%s status 0x%x", hdev->name, status);
861
862 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
863 }
864
865 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
866 struct sk_buff *skb)
867 {
868 __u8 status = *((__u8 *) skb->data);
869
870 BT_DBG("%s status 0x%x", hdev->name, status);
871
872 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
873 }
874
875 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
876 {
877 __u8 status = *((__u8 *) skb->data);
878
879 BT_DBG("%s status 0x%x", hdev->name, status);
880
881 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
882 }
883
884 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
885 {
886 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
887 struct hci_cp_pin_code_reply *cp;
888 struct hci_conn *conn;
889
890 BT_DBG("%s status 0x%x", hdev->name, rp->status);
891
892 hci_dev_lock(hdev);
893
894 if (test_bit(HCI_MGMT, &hdev->dev_flags))
895 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
896
897 if (rp->status != 0)
898 goto unlock;
899
900 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
901 if (!cp)
902 goto unlock;
903
904 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
905 if (conn)
906 conn->pin_length = cp->pin_len;
907
908 unlock:
909 hci_dev_unlock(hdev);
910 }
911
912 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
913 {
914 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
915
916 BT_DBG("%s status 0x%x", hdev->name, rp->status);
917
918 hci_dev_lock(hdev);
919
920 if (test_bit(HCI_MGMT, &hdev->dev_flags))
921 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
922 rp->status);
923
924 hci_dev_unlock(hdev);
925 }
926
927 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
928 struct sk_buff *skb)
929 {
930 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
931
932 BT_DBG("%s status 0x%x", hdev->name, rp->status);
933
934 if (rp->status)
935 return;
936
937 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
938 hdev->le_pkts = rp->le_max_pkt;
939
940 hdev->le_cnt = hdev->le_pkts;
941
942 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
943
944 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
945 }
946
947 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
948 {
949 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
950
951 BT_DBG("%s status 0x%x", hdev->name, rp->status);
952
953 hci_dev_lock(hdev);
954
955 if (test_bit(HCI_MGMT, &hdev->dev_flags))
956 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
957 rp->status);
958
959 hci_dev_unlock(hdev);
960 }
961
962 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
963 struct sk_buff *skb)
964 {
965 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
966
967 BT_DBG("%s status 0x%x", hdev->name, rp->status);
968
969 hci_dev_lock(hdev);
970
971 if (test_bit(HCI_MGMT, &hdev->dev_flags))
972 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
973 rp->status);
974
975 hci_dev_unlock(hdev);
976 }
977
978 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
979 {
980 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
981
982 BT_DBG("%s status 0x%x", hdev->name, rp->status);
983
984 hci_dev_lock(hdev);
985
986 if (test_bit(HCI_MGMT, &hdev->dev_flags))
987 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
988 rp->status);
989
990 hci_dev_unlock(hdev);
991 }
992
993 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
994 struct sk_buff *skb)
995 {
996 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
997
998 BT_DBG("%s status 0x%x", hdev->name, rp->status);
999
1000 hci_dev_lock(hdev);
1001
1002 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1003 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1004 rp->status);
1005
1006 hci_dev_unlock(hdev);
1007 }
1008
1009 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1010 struct sk_buff *skb)
1011 {
1012 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1013
1014 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1015
1016 hci_dev_lock(hdev);
1017 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1018 rp->randomizer, rp->status);
1019 hci_dev_unlock(hdev);
1020 }
1021
1022 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1023 {
1024 __u8 status = *((__u8 *) skb->data);
1025
1026 BT_DBG("%s status 0x%x", hdev->name, status);
1027 }
1028
1029 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1030 struct sk_buff *skb)
1031 {
1032 struct hci_cp_le_set_scan_enable *cp;
1033 __u8 status = *((__u8 *) skb->data);
1034
1035 BT_DBG("%s status 0x%x", hdev->name, status);
1036
1037 if (status)
1038 return;
1039
1040 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1041 if (!cp)
1042 return;
1043
1044 switch (cp->enable) {
1045 case LE_SCANNING_ENABLED:
1046 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1047
1048 cancel_delayed_work_sync(&hdev->adv_work);
1049
1050 hci_dev_lock(hdev);
1051 hci_adv_entries_clear(hdev);
1052 hci_dev_unlock(hdev);
1053 break;
1054
1055 case LE_SCANNING_DISABLED:
1056 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1057
1058 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1059 break;
1060
1061 default:
1062 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1063 break;
1064 }
1065 }
1066
1067 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1068 {
1069 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1070
1071 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1072
1073 if (rp->status)
1074 return;
1075
1076 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1077 }
1078
1079 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1080 {
1081 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1082
1083 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1084
1085 if (rp->status)
1086 return;
1087
1088 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1089 }
1090
1091 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1092 struct sk_buff *skb)
1093 {
1094 struct hci_cp_read_local_ext_features cp;
1095 __u8 status = *((__u8 *) skb->data);
1096
1097 BT_DBG("%s status 0x%x", hdev->name, status);
1098
1099 if (status)
1100 return;
1101
1102 cp.page = 0x01;
1103 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
1104 }
1105
1106 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1107 {
1108 BT_DBG("%s status 0x%x", hdev->name, status);
1109
1110 if (status) {
1111 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1112 hci_conn_check_pending(hdev);
1113 hci_dev_lock(hdev);
1114 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1115 mgmt_start_discovery_failed(hdev, status);
1116 hci_dev_unlock(hdev);
1117 return;
1118 }
1119
1120 set_bit(HCI_INQUIRY, &hdev->flags);
1121
1122 hci_dev_lock(hdev);
1123 hci_discovery_set_state(hdev, DISCOVERY_INQUIRY);
1124 hci_dev_unlock(hdev);
1125 }
1126
1127 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1128 {
1129 struct hci_cp_create_conn *cp;
1130 struct hci_conn *conn;
1131
1132 BT_DBG("%s status 0x%x", hdev->name, status);
1133
1134 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1135 if (!cp)
1136 return;
1137
1138 hci_dev_lock(hdev);
1139
1140 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1141
1142 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1143
1144 if (status) {
1145 if (conn && conn->state == BT_CONNECT) {
1146 if (status != 0x0c || conn->attempt > 2) {
1147 conn->state = BT_CLOSED;
1148 hci_proto_connect_cfm(conn, status);
1149 hci_conn_del(conn);
1150 } else
1151 conn->state = BT_CONNECT2;
1152 }
1153 } else {
1154 if (!conn) {
1155 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1156 if (conn) {
1157 conn->out = true;
1158 conn->link_mode |= HCI_LM_MASTER;
1159 } else
1160 BT_ERR("No memory for new connection");
1161 }
1162 }
1163
1164 hci_dev_unlock(hdev);
1165 }
1166
1167 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1168 {
1169 struct hci_cp_add_sco *cp;
1170 struct hci_conn *acl, *sco;
1171 __u16 handle;
1172
1173 BT_DBG("%s status 0x%x", hdev->name, status);
1174
1175 if (!status)
1176 return;
1177
1178 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1179 if (!cp)
1180 return;
1181
1182 handle = __le16_to_cpu(cp->handle);
1183
1184 BT_DBG("%s handle %d", hdev->name, handle);
1185
1186 hci_dev_lock(hdev);
1187
1188 acl = hci_conn_hash_lookup_handle(hdev, handle);
1189 if (acl) {
1190 sco = acl->link;
1191 if (sco) {
1192 sco->state = BT_CLOSED;
1193
1194 hci_proto_connect_cfm(sco, status);
1195 hci_conn_del(sco);
1196 }
1197 }
1198
1199 hci_dev_unlock(hdev);
1200 }
1201
1202 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1203 {
1204 struct hci_cp_auth_requested *cp;
1205 struct hci_conn *conn;
1206
1207 BT_DBG("%s status 0x%x", hdev->name, status);
1208
1209 if (!status)
1210 return;
1211
1212 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1213 if (!cp)
1214 return;
1215
1216 hci_dev_lock(hdev);
1217
1218 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1219 if (conn) {
1220 if (conn->state == BT_CONFIG) {
1221 hci_proto_connect_cfm(conn, status);
1222 hci_conn_put(conn);
1223 }
1224 }
1225
1226 hci_dev_unlock(hdev);
1227 }
1228
1229 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1230 {
1231 struct hci_cp_set_conn_encrypt *cp;
1232 struct hci_conn *conn;
1233
1234 BT_DBG("%s status 0x%x", hdev->name, status);
1235
1236 if (!status)
1237 return;
1238
1239 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1240 if (!cp)
1241 return;
1242
1243 hci_dev_lock(hdev);
1244
1245 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1246 if (conn) {
1247 if (conn->state == BT_CONFIG) {
1248 hci_proto_connect_cfm(conn, status);
1249 hci_conn_put(conn);
1250 }
1251 }
1252
1253 hci_dev_unlock(hdev);
1254 }
1255
1256 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1257 struct hci_conn *conn)
1258 {
1259 if (conn->state != BT_CONFIG || !conn->out)
1260 return 0;
1261
1262 if (conn->pending_sec_level == BT_SECURITY_SDP)
1263 return 0;
1264
1265 /* Only request authentication for SSP connections or non-SSP
1266 * devices with sec_level HIGH or if MITM protection is requested */
1267 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
1268 conn->pending_sec_level != BT_SECURITY_HIGH &&
1269 !(conn->auth_type & 0x01))
1270 return 0;
1271
1272 return 1;
1273 }
1274
1275 static inline int hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e)
1276 {
1277 struct hci_cp_remote_name_req cp;
1278
1279 memset(&cp, 0, sizeof(cp));
1280
1281 bacpy(&cp.bdaddr, &e->data.bdaddr);
1282 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1283 cp.pscan_mode = e->data.pscan_mode;
1284 cp.clock_offset = e->data.clock_offset;
1285
1286 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1287 }
1288
1289 static void hci_resolve_next_name(struct hci_dev *hdev, bdaddr_t *bdaddr)
1290 {
1291 struct discovery_state *discov = &hdev->discovery;
1292 struct inquiry_entry *e;
1293
1294 if (discov->state == DISCOVERY_STOPPING)
1295 goto discov_complete;
1296
1297 if (discov->state != DISCOVERY_RESOLVING)
1298 return;
1299
1300 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1301 if (e) {
1302 e->name_state = NAME_KNOWN;
1303 list_del(&e->list);
1304 }
1305
1306 if (list_empty(&discov->resolve))
1307 goto discov_complete;
1308
1309 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1310 if (hci_resolve_name(hdev, e) == 0) {
1311 e->name_state = NAME_PENDING;
1312 return;
1313 }
1314
1315 discov_complete:
1316 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1317 }
1318
1319 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1320 {
1321 struct hci_cp_remote_name_req *cp;
1322 struct hci_conn *conn;
1323
1324 BT_DBG("%s status 0x%x", hdev->name, status);
1325
1326 /* If successful wait for the name req complete event before
1327 * checking for the need to do authentication */
1328 if (!status)
1329 return;
1330
1331 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1332 if (!cp)
1333 return;
1334
1335 hci_dev_lock(hdev);
1336
1337 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1338 hci_resolve_next_name(hdev, &cp->bdaddr);
1339
1340 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1341 if (!conn)
1342 goto unlock;
1343
1344 if (!hci_outgoing_auth_needed(hdev, conn))
1345 goto unlock;
1346
1347 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1348 struct hci_cp_auth_requested cp;
1349 cp.handle = __cpu_to_le16(conn->handle);
1350 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1351 }
1352
1353 unlock:
1354 hci_dev_unlock(hdev);
1355 }
1356
1357 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1358 {
1359 struct hci_cp_read_remote_features *cp;
1360 struct hci_conn *conn;
1361
1362 BT_DBG("%s status 0x%x", hdev->name, status);
1363
1364 if (!status)
1365 return;
1366
1367 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1368 if (!cp)
1369 return;
1370
1371 hci_dev_lock(hdev);
1372
1373 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1374 if (conn) {
1375 if (conn->state == BT_CONFIG) {
1376 hci_proto_connect_cfm(conn, status);
1377 hci_conn_put(conn);
1378 }
1379 }
1380
1381 hci_dev_unlock(hdev);
1382 }
1383
1384 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1385 {
1386 struct hci_cp_read_remote_ext_features *cp;
1387 struct hci_conn *conn;
1388
1389 BT_DBG("%s status 0x%x", hdev->name, status);
1390
1391 if (!status)
1392 return;
1393
1394 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1395 if (!cp)
1396 return;
1397
1398 hci_dev_lock(hdev);
1399
1400 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1401 if (conn) {
1402 if (conn->state == BT_CONFIG) {
1403 hci_proto_connect_cfm(conn, status);
1404 hci_conn_put(conn);
1405 }
1406 }
1407
1408 hci_dev_unlock(hdev);
1409 }
1410
1411 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1412 {
1413 struct hci_cp_setup_sync_conn *cp;
1414 struct hci_conn *acl, *sco;
1415 __u16 handle;
1416
1417 BT_DBG("%s status 0x%x", hdev->name, status);
1418
1419 if (!status)
1420 return;
1421
1422 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1423 if (!cp)
1424 return;
1425
1426 handle = __le16_to_cpu(cp->handle);
1427
1428 BT_DBG("%s handle %d", hdev->name, handle);
1429
1430 hci_dev_lock(hdev);
1431
1432 acl = hci_conn_hash_lookup_handle(hdev, handle);
1433 if (acl) {
1434 sco = acl->link;
1435 if (sco) {
1436 sco->state = BT_CLOSED;
1437
1438 hci_proto_connect_cfm(sco, status);
1439 hci_conn_del(sco);
1440 }
1441 }
1442
1443 hci_dev_unlock(hdev);
1444 }
1445
1446 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1447 {
1448 struct hci_cp_sniff_mode *cp;
1449 struct hci_conn *conn;
1450
1451 BT_DBG("%s status 0x%x", hdev->name, status);
1452
1453 if (!status)
1454 return;
1455
1456 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1457 if (!cp)
1458 return;
1459
1460 hci_dev_lock(hdev);
1461
1462 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1463 if (conn) {
1464 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1465
1466 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1467 hci_sco_setup(conn, status);
1468 }
1469
1470 hci_dev_unlock(hdev);
1471 }
1472
1473 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1474 {
1475 struct hci_cp_exit_sniff_mode *cp;
1476 struct hci_conn *conn;
1477
1478 BT_DBG("%s status 0x%x", hdev->name, status);
1479
1480 if (!status)
1481 return;
1482
1483 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1484 if (!cp)
1485 return;
1486
1487 hci_dev_lock(hdev);
1488
1489 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1490 if (conn) {
1491 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1492
1493 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1494 hci_sco_setup(conn, status);
1495 }
1496
1497 hci_dev_unlock(hdev);
1498 }
1499
1500 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1501 {
1502 struct hci_cp_le_create_conn *cp;
1503 struct hci_conn *conn;
1504
1505 BT_DBG("%s status 0x%x", hdev->name, status);
1506
1507 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1508 if (!cp)
1509 return;
1510
1511 hci_dev_lock(hdev);
1512
1513 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1514
1515 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1516 conn);
1517
1518 if (status) {
1519 if (conn && conn->state == BT_CONNECT) {
1520 conn->state = BT_CLOSED;
1521 hci_proto_connect_cfm(conn, status);
1522 hci_conn_del(conn);
1523 }
1524 } else {
1525 if (!conn) {
1526 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1527 if (conn) {
1528 conn->dst_type = cp->peer_addr_type;
1529 conn->out = true;
1530 } else {
1531 BT_ERR("No memory for new connection");
1532 }
1533 }
1534 }
1535
1536 hci_dev_unlock(hdev);
1537 }
1538
1539 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1540 {
1541 BT_DBG("%s status 0x%x", hdev->name, status);
1542 }
1543
1544 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1545 {
1546 __u8 status = *((__u8 *) skb->data);
1547 struct discovery_state *discov = &hdev->discovery;
1548 struct inquiry_entry *e;
1549
1550 BT_DBG("%s status %d", hdev->name, status);
1551
1552 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1553
1554 hci_conn_check_pending(hdev);
1555
1556 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1557 return;
1558
1559 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1560 return;
1561
1562 hci_dev_lock(hdev);
1563
1564 if (discov->state != DISCOVERY_INQUIRY)
1565 goto unlock;
1566
1567 if (list_empty(&discov->resolve)) {
1568 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1569 goto unlock;
1570 }
1571
1572 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1573 if (e && hci_resolve_name(hdev, e) == 0) {
1574 e->name_state = NAME_PENDING;
1575 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1576 } else {
1577 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1578 }
1579
1580 unlock:
1581 hci_dev_unlock(hdev);
1582 }
1583
1584 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1585 {
1586 struct inquiry_data data;
1587 struct inquiry_info *info = (void *) (skb->data + 1);
1588 int num_rsp = *((__u8 *) skb->data);
1589
1590 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1591
1592 if (!num_rsp)
1593 return;
1594
1595 hci_dev_lock(hdev);
1596
1597 for (; num_rsp; num_rsp--, info++) {
1598 bool name_known;
1599
1600 bacpy(&data.bdaddr, &info->bdaddr);
1601 data.pscan_rep_mode = info->pscan_rep_mode;
1602 data.pscan_period_mode = info->pscan_period_mode;
1603 data.pscan_mode = info->pscan_mode;
1604 memcpy(data.dev_class, info->dev_class, 3);
1605 data.clock_offset = info->clock_offset;
1606 data.rssi = 0x00;
1607 data.ssp_mode = 0x00;
1608
1609 name_known = hci_inquiry_cache_update(hdev, &data, false);
1610 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1611 info->dev_class, 0, !name_known,
1612 NULL, 0);
1613 }
1614
1615 hci_dev_unlock(hdev);
1616 }
1617
1618 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1619 {
1620 struct hci_ev_conn_complete *ev = (void *) skb->data;
1621 struct hci_conn *conn;
1622
1623 BT_DBG("%s", hdev->name);
1624
1625 hci_dev_lock(hdev);
1626
1627 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1628 if (!conn) {
1629 if (ev->link_type != SCO_LINK)
1630 goto unlock;
1631
1632 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1633 if (!conn)
1634 goto unlock;
1635
1636 conn->type = SCO_LINK;
1637 }
1638
1639 if (!ev->status) {
1640 conn->handle = __le16_to_cpu(ev->handle);
1641
1642 if (conn->type == ACL_LINK) {
1643 conn->state = BT_CONFIG;
1644 hci_conn_hold(conn);
1645 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1646 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
1647 conn->dst_type);
1648 } else
1649 conn->state = BT_CONNECTED;
1650
1651 hci_conn_hold_device(conn);
1652 hci_conn_add_sysfs(conn);
1653
1654 if (test_bit(HCI_AUTH, &hdev->flags))
1655 conn->link_mode |= HCI_LM_AUTH;
1656
1657 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1658 conn->link_mode |= HCI_LM_ENCRYPT;
1659
1660 /* Get remote features */
1661 if (conn->type == ACL_LINK) {
1662 struct hci_cp_read_remote_features cp;
1663 cp.handle = ev->handle;
1664 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1665 sizeof(cp), &cp);
1666 }
1667
1668 /* Set packet type for incoming connection */
1669 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1670 struct hci_cp_change_conn_ptype cp;
1671 cp.handle = ev->handle;
1672 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1673 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1674 sizeof(cp), &cp);
1675 }
1676 } else {
1677 conn->state = BT_CLOSED;
1678 if (conn->type == ACL_LINK)
1679 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1680 conn->dst_type, ev->status);
1681 }
1682
1683 if (conn->type == ACL_LINK)
1684 hci_sco_setup(conn, ev->status);
1685
1686 if (ev->status) {
1687 hci_proto_connect_cfm(conn, ev->status);
1688 hci_conn_del(conn);
1689 } else if (ev->link_type != ACL_LINK)
1690 hci_proto_connect_cfm(conn, ev->status);
1691
1692 unlock:
1693 hci_dev_unlock(hdev);
1694
1695 hci_conn_check_pending(hdev);
1696 }
1697
1698 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1699 {
1700 struct hci_ev_conn_request *ev = (void *) skb->data;
1701 int mask = hdev->link_mode;
1702
1703 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1704 batostr(&ev->bdaddr), ev->link_type);
1705
1706 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1707
1708 if ((mask & HCI_LM_ACCEPT) &&
1709 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1710 /* Connection accepted */
1711 struct inquiry_entry *ie;
1712 struct hci_conn *conn;
1713
1714 hci_dev_lock(hdev);
1715
1716 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1717 if (ie)
1718 memcpy(ie->data.dev_class, ev->dev_class, 3);
1719
1720 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1721 if (!conn) {
1722 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1723 if (!conn) {
1724 BT_ERR("No memory for new connection");
1725 hci_dev_unlock(hdev);
1726 return;
1727 }
1728 }
1729
1730 memcpy(conn->dev_class, ev->dev_class, 3);
1731 conn->state = BT_CONNECT;
1732
1733 hci_dev_unlock(hdev);
1734
1735 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1736 struct hci_cp_accept_conn_req cp;
1737
1738 bacpy(&cp.bdaddr, &ev->bdaddr);
1739
1740 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1741 cp.role = 0x00; /* Become master */
1742 else
1743 cp.role = 0x01; /* Remain slave */
1744
1745 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1746 sizeof(cp), &cp);
1747 } else {
1748 struct hci_cp_accept_sync_conn_req cp;
1749
1750 bacpy(&cp.bdaddr, &ev->bdaddr);
1751 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1752
1753 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1754 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1755 cp.max_latency = cpu_to_le16(0xffff);
1756 cp.content_format = cpu_to_le16(hdev->voice_setting);
1757 cp.retrans_effort = 0xff;
1758
1759 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1760 sizeof(cp), &cp);
1761 }
1762 } else {
1763 /* Connection rejected */
1764 struct hci_cp_reject_conn_req cp;
1765
1766 bacpy(&cp.bdaddr, &ev->bdaddr);
1767 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1768 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1769 }
1770 }
1771
1772 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1773 {
1774 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1775 struct hci_conn *conn;
1776
1777 BT_DBG("%s status %d", hdev->name, ev->status);
1778
1779 hci_dev_lock(hdev);
1780
1781 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1782 if (!conn)
1783 goto unlock;
1784
1785 if (ev->status == 0)
1786 conn->state = BT_CLOSED;
1787
1788 if (conn->type == ACL_LINK || conn->type == LE_LINK) {
1789 if (ev->status != 0)
1790 mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
1791 else
1792 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1793 conn->dst_type);
1794 }
1795
1796 if (ev->status == 0) {
1797 hci_proto_disconn_cfm(conn, ev->reason);
1798 hci_conn_del(conn);
1799 }
1800
1801 unlock:
1802 hci_dev_unlock(hdev);
1803 }
1804
1805 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1806 {
1807 struct hci_ev_auth_complete *ev = (void *) skb->data;
1808 struct hci_conn *conn;
1809
1810 BT_DBG("%s status %d", hdev->name, ev->status);
1811
1812 hci_dev_lock(hdev);
1813
1814 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1815 if (!conn)
1816 goto unlock;
1817
1818 if (!ev->status) {
1819 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
1820 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1821 BT_INFO("re-auth of legacy device is not possible.");
1822 } else {
1823 conn->link_mode |= HCI_LM_AUTH;
1824 conn->sec_level = conn->pending_sec_level;
1825 }
1826 } else {
1827 mgmt_auth_failed(hdev, &conn->dst, ev->status);
1828 }
1829
1830 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1831 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1832
1833 if (conn->state == BT_CONFIG) {
1834 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
1835 struct hci_cp_set_conn_encrypt cp;
1836 cp.handle = ev->handle;
1837 cp.encrypt = 0x01;
1838 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1839 &cp);
1840 } else {
1841 conn->state = BT_CONNECTED;
1842 hci_proto_connect_cfm(conn, ev->status);
1843 hci_conn_put(conn);
1844 }
1845 } else {
1846 hci_auth_cfm(conn, ev->status);
1847
1848 hci_conn_hold(conn);
1849 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1850 hci_conn_put(conn);
1851 }
1852
1853 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1854 if (!ev->status) {
1855 struct hci_cp_set_conn_encrypt cp;
1856 cp.handle = ev->handle;
1857 cp.encrypt = 0x01;
1858 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1859 &cp);
1860 } else {
1861 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1862 hci_encrypt_cfm(conn, ev->status, 0x00);
1863 }
1864 }
1865
1866 unlock:
1867 hci_dev_unlock(hdev);
1868 }
1869
1870 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1871 {
1872 struct hci_ev_remote_name *ev = (void *) skb->data;
1873 struct hci_conn *conn;
1874
1875 BT_DBG("%s", hdev->name);
1876
1877 hci_conn_check_pending(hdev);
1878
1879 hci_dev_lock(hdev);
1880
1881 if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
1882 if (ev->status == 0)
1883 mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
1884
1885 hci_resolve_next_name(hdev, &ev->bdaddr);
1886 }
1887
1888 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1889 if (!conn)
1890 goto unlock;
1891
1892 if (!hci_outgoing_auth_needed(hdev, conn))
1893 goto unlock;
1894
1895 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1896 struct hci_cp_auth_requested cp;
1897 cp.handle = __cpu_to_le16(conn->handle);
1898 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1899 }
1900
1901 unlock:
1902 hci_dev_unlock(hdev);
1903 }
1904
1905 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1906 {
1907 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1908 struct hci_conn *conn;
1909
1910 BT_DBG("%s status %d", hdev->name, ev->status);
1911
1912 hci_dev_lock(hdev);
1913
1914 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1915 if (conn) {
1916 if (!ev->status) {
1917 if (ev->encrypt) {
1918 /* Encryption implies authentication */
1919 conn->link_mode |= HCI_LM_AUTH;
1920 conn->link_mode |= HCI_LM_ENCRYPT;
1921 conn->sec_level = conn->pending_sec_level;
1922 } else
1923 conn->link_mode &= ~HCI_LM_ENCRYPT;
1924 }
1925
1926 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1927
1928 if (conn->state == BT_CONFIG) {
1929 if (!ev->status)
1930 conn->state = BT_CONNECTED;
1931
1932 hci_proto_connect_cfm(conn, ev->status);
1933 hci_conn_put(conn);
1934 } else
1935 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1936 }
1937
1938 hci_dev_unlock(hdev);
1939 }
1940
1941 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1942 {
1943 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1944 struct hci_conn *conn;
1945
1946 BT_DBG("%s status %d", hdev->name, ev->status);
1947
1948 hci_dev_lock(hdev);
1949
1950 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1951 if (conn) {
1952 if (!ev->status)
1953 conn->link_mode |= HCI_LM_SECURE;
1954
1955 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1956
1957 hci_key_change_cfm(conn, ev->status);
1958 }
1959
1960 hci_dev_unlock(hdev);
1961 }
1962
1963 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1964 {
1965 struct hci_ev_remote_features *ev = (void *) skb->data;
1966 struct hci_conn *conn;
1967
1968 BT_DBG("%s status %d", hdev->name, ev->status);
1969
1970 hci_dev_lock(hdev);
1971
1972 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1973 if (!conn)
1974 goto unlock;
1975
1976 if (!ev->status)
1977 memcpy(conn->features, ev->features, 8);
1978
1979 if (conn->state != BT_CONFIG)
1980 goto unlock;
1981
1982 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1983 struct hci_cp_read_remote_ext_features cp;
1984 cp.handle = ev->handle;
1985 cp.page = 0x01;
1986 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1987 sizeof(cp), &cp);
1988 goto unlock;
1989 }
1990
1991 if (!ev->status) {
1992 struct hci_cp_remote_name_req cp;
1993 memset(&cp, 0, sizeof(cp));
1994 bacpy(&cp.bdaddr, &conn->dst);
1995 cp.pscan_rep_mode = 0x02;
1996 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1997 }
1998
1999 if (!hci_outgoing_auth_needed(hdev, conn)) {
2000 conn->state = BT_CONNECTED;
2001 hci_proto_connect_cfm(conn, ev->status);
2002 hci_conn_put(conn);
2003 }
2004
2005 unlock:
2006 hci_dev_unlock(hdev);
2007 }
2008
2009 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2010 {
2011 BT_DBG("%s", hdev->name);
2012 }
2013
2014 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2015 {
2016 BT_DBG("%s", hdev->name);
2017 }
2018
2019 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2020 {
2021 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2022 __u16 opcode;
2023
2024 skb_pull(skb, sizeof(*ev));
2025
2026 opcode = __le16_to_cpu(ev->opcode);
2027
2028 switch (opcode) {
2029 case HCI_OP_INQUIRY_CANCEL:
2030 hci_cc_inquiry_cancel(hdev, skb);
2031 break;
2032
2033 case HCI_OP_EXIT_PERIODIC_INQ:
2034 hci_cc_exit_periodic_inq(hdev, skb);
2035 break;
2036
2037 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2038 hci_cc_remote_name_req_cancel(hdev, skb);
2039 break;
2040
2041 case HCI_OP_ROLE_DISCOVERY:
2042 hci_cc_role_discovery(hdev, skb);
2043 break;
2044
2045 case HCI_OP_READ_LINK_POLICY:
2046 hci_cc_read_link_policy(hdev, skb);
2047 break;
2048
2049 case HCI_OP_WRITE_LINK_POLICY:
2050 hci_cc_write_link_policy(hdev, skb);
2051 break;
2052
2053 case HCI_OP_READ_DEF_LINK_POLICY:
2054 hci_cc_read_def_link_policy(hdev, skb);
2055 break;
2056
2057 case HCI_OP_WRITE_DEF_LINK_POLICY:
2058 hci_cc_write_def_link_policy(hdev, skb);
2059 break;
2060
2061 case HCI_OP_RESET:
2062 hci_cc_reset(hdev, skb);
2063 break;
2064
2065 case HCI_OP_WRITE_LOCAL_NAME:
2066 hci_cc_write_local_name(hdev, skb);
2067 break;
2068
2069 case HCI_OP_READ_LOCAL_NAME:
2070 hci_cc_read_local_name(hdev, skb);
2071 break;
2072
2073 case HCI_OP_WRITE_AUTH_ENABLE:
2074 hci_cc_write_auth_enable(hdev, skb);
2075 break;
2076
2077 case HCI_OP_WRITE_ENCRYPT_MODE:
2078 hci_cc_write_encrypt_mode(hdev, skb);
2079 break;
2080
2081 case HCI_OP_WRITE_SCAN_ENABLE:
2082 hci_cc_write_scan_enable(hdev, skb);
2083 break;
2084
2085 case HCI_OP_READ_CLASS_OF_DEV:
2086 hci_cc_read_class_of_dev(hdev, skb);
2087 break;
2088
2089 case HCI_OP_WRITE_CLASS_OF_DEV:
2090 hci_cc_write_class_of_dev(hdev, skb);
2091 break;
2092
2093 case HCI_OP_READ_VOICE_SETTING:
2094 hci_cc_read_voice_setting(hdev, skb);
2095 break;
2096
2097 case HCI_OP_WRITE_VOICE_SETTING:
2098 hci_cc_write_voice_setting(hdev, skb);
2099 break;
2100
2101 case HCI_OP_HOST_BUFFER_SIZE:
2102 hci_cc_host_buffer_size(hdev, skb);
2103 break;
2104
2105 case HCI_OP_READ_SSP_MODE:
2106 hci_cc_read_ssp_mode(hdev, skb);
2107 break;
2108
2109 case HCI_OP_WRITE_SSP_MODE:
2110 hci_cc_write_ssp_mode(hdev, skb);
2111 break;
2112
2113 case HCI_OP_READ_LOCAL_VERSION:
2114 hci_cc_read_local_version(hdev, skb);
2115 break;
2116
2117 case HCI_OP_READ_LOCAL_COMMANDS:
2118 hci_cc_read_local_commands(hdev, skb);
2119 break;
2120
2121 case HCI_OP_READ_LOCAL_FEATURES:
2122 hci_cc_read_local_features(hdev, skb);
2123 break;
2124
2125 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2126 hci_cc_read_local_ext_features(hdev, skb);
2127 break;
2128
2129 case HCI_OP_READ_BUFFER_SIZE:
2130 hci_cc_read_buffer_size(hdev, skb);
2131 break;
2132
2133 case HCI_OP_READ_BD_ADDR:
2134 hci_cc_read_bd_addr(hdev, skb);
2135 break;
2136
2137 case HCI_OP_READ_DATA_BLOCK_SIZE:
2138 hci_cc_read_data_block_size(hdev, skb);
2139 break;
2140
2141 case HCI_OP_WRITE_CA_TIMEOUT:
2142 hci_cc_write_ca_timeout(hdev, skb);
2143 break;
2144
2145 case HCI_OP_READ_FLOW_CONTROL_MODE:
2146 hci_cc_read_flow_control_mode(hdev, skb);
2147 break;
2148
2149 case HCI_OP_READ_LOCAL_AMP_INFO:
2150 hci_cc_read_local_amp_info(hdev, skb);
2151 break;
2152
2153 case HCI_OP_DELETE_STORED_LINK_KEY:
2154 hci_cc_delete_stored_link_key(hdev, skb);
2155 break;
2156
2157 case HCI_OP_SET_EVENT_MASK:
2158 hci_cc_set_event_mask(hdev, skb);
2159 break;
2160
2161 case HCI_OP_WRITE_INQUIRY_MODE:
2162 hci_cc_write_inquiry_mode(hdev, skb);
2163 break;
2164
2165 case HCI_OP_READ_INQ_RSP_TX_POWER:
2166 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2167 break;
2168
2169 case HCI_OP_SET_EVENT_FLT:
2170 hci_cc_set_event_flt(hdev, skb);
2171 break;
2172
2173 case HCI_OP_PIN_CODE_REPLY:
2174 hci_cc_pin_code_reply(hdev, skb);
2175 break;
2176
2177 case HCI_OP_PIN_CODE_NEG_REPLY:
2178 hci_cc_pin_code_neg_reply(hdev, skb);
2179 break;
2180
2181 case HCI_OP_READ_LOCAL_OOB_DATA:
2182 hci_cc_read_local_oob_data_reply(hdev, skb);
2183 break;
2184
2185 case HCI_OP_LE_READ_BUFFER_SIZE:
2186 hci_cc_le_read_buffer_size(hdev, skb);
2187 break;
2188
2189 case HCI_OP_USER_CONFIRM_REPLY:
2190 hci_cc_user_confirm_reply(hdev, skb);
2191 break;
2192
2193 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2194 hci_cc_user_confirm_neg_reply(hdev, skb);
2195 break;
2196
2197 case HCI_OP_USER_PASSKEY_REPLY:
2198 hci_cc_user_passkey_reply(hdev, skb);
2199 break;
2200
2201 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2202 hci_cc_user_passkey_neg_reply(hdev, skb);
2203
2204 case HCI_OP_LE_SET_SCAN_PARAM:
2205 hci_cc_le_set_scan_param(hdev, skb);
2206 break;
2207
2208 case HCI_OP_LE_SET_SCAN_ENABLE:
2209 hci_cc_le_set_scan_enable(hdev, skb);
2210 break;
2211
2212 case HCI_OP_LE_LTK_REPLY:
2213 hci_cc_le_ltk_reply(hdev, skb);
2214 break;
2215
2216 case HCI_OP_LE_LTK_NEG_REPLY:
2217 hci_cc_le_ltk_neg_reply(hdev, skb);
2218 break;
2219
2220 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2221 hci_cc_write_le_host_supported(hdev, skb);
2222 break;
2223
2224 default:
2225 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2226 break;
2227 }
2228
2229 if (ev->opcode != HCI_OP_NOP)
2230 del_timer(&hdev->cmd_timer);
2231
2232 if (ev->ncmd) {
2233 atomic_set(&hdev->cmd_cnt, 1);
2234 if (!skb_queue_empty(&hdev->cmd_q))
2235 queue_work(hdev->workqueue, &hdev->cmd_work);
2236 }
2237 }
2238
2239 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2240 {
2241 struct hci_ev_cmd_status *ev = (void *) skb->data;
2242 __u16 opcode;
2243
2244 skb_pull(skb, sizeof(*ev));
2245
2246 opcode = __le16_to_cpu(ev->opcode);
2247
2248 switch (opcode) {
2249 case HCI_OP_INQUIRY:
2250 hci_cs_inquiry(hdev, ev->status);
2251 break;
2252
2253 case HCI_OP_CREATE_CONN:
2254 hci_cs_create_conn(hdev, ev->status);
2255 break;
2256
2257 case HCI_OP_ADD_SCO:
2258 hci_cs_add_sco(hdev, ev->status);
2259 break;
2260
2261 case HCI_OP_AUTH_REQUESTED:
2262 hci_cs_auth_requested(hdev, ev->status);
2263 break;
2264
2265 case HCI_OP_SET_CONN_ENCRYPT:
2266 hci_cs_set_conn_encrypt(hdev, ev->status);
2267 break;
2268
2269 case HCI_OP_REMOTE_NAME_REQ:
2270 hci_cs_remote_name_req(hdev, ev->status);
2271 break;
2272
2273 case HCI_OP_READ_REMOTE_FEATURES:
2274 hci_cs_read_remote_features(hdev, ev->status);
2275 break;
2276
2277 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2278 hci_cs_read_remote_ext_features(hdev, ev->status);
2279 break;
2280
2281 case HCI_OP_SETUP_SYNC_CONN:
2282 hci_cs_setup_sync_conn(hdev, ev->status);
2283 break;
2284
2285 case HCI_OP_SNIFF_MODE:
2286 hci_cs_sniff_mode(hdev, ev->status);
2287 break;
2288
2289 case HCI_OP_EXIT_SNIFF_MODE:
2290 hci_cs_exit_sniff_mode(hdev, ev->status);
2291 break;
2292
2293 case HCI_OP_DISCONNECT:
2294 if (ev->status != 0)
2295 mgmt_disconnect_failed(hdev, NULL, ev->status);
2296 break;
2297
2298 case HCI_OP_LE_CREATE_CONN:
2299 hci_cs_le_create_conn(hdev, ev->status);
2300 break;
2301
2302 case HCI_OP_LE_START_ENC:
2303 hci_cs_le_start_enc(hdev, ev->status);
2304 break;
2305
2306 default:
2307 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2308 break;
2309 }
2310
2311 if (ev->opcode != HCI_OP_NOP)
2312 del_timer(&hdev->cmd_timer);
2313
2314 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2315 atomic_set(&hdev->cmd_cnt, 1);
2316 if (!skb_queue_empty(&hdev->cmd_q))
2317 queue_work(hdev->workqueue, &hdev->cmd_work);
2318 }
2319 }
2320
2321 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2322 {
2323 struct hci_ev_role_change *ev = (void *) skb->data;
2324 struct hci_conn *conn;
2325
2326 BT_DBG("%s status %d", hdev->name, ev->status);
2327
2328 hci_dev_lock(hdev);
2329
2330 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2331 if (conn) {
2332 if (!ev->status) {
2333 if (ev->role)
2334 conn->link_mode &= ~HCI_LM_MASTER;
2335 else
2336 conn->link_mode |= HCI_LM_MASTER;
2337 }
2338
2339 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2340
2341 hci_role_switch_cfm(conn, ev->status, ev->role);
2342 }
2343
2344 hci_dev_unlock(hdev);
2345 }
2346
2347 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2348 {
2349 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2350 int i;
2351
2352 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2353 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2354 return;
2355 }
2356
2357 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2358 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2359 BT_DBG("%s bad parameters", hdev->name);
2360 return;
2361 }
2362
2363 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2364
2365 for (i = 0; i < ev->num_hndl; i++) {
2366 struct hci_comp_pkts_info *info = &ev->handles[i];
2367 struct hci_conn *conn;
2368 __u16 handle, count;
2369
2370 handle = __le16_to_cpu(info->handle);
2371 count = __le16_to_cpu(info->count);
2372
2373 conn = hci_conn_hash_lookup_handle(hdev, handle);
2374 if (!conn)
2375 continue;
2376
2377 conn->sent -= count;
2378
2379 switch (conn->type) {
2380 case ACL_LINK:
2381 hdev->acl_cnt += count;
2382 if (hdev->acl_cnt > hdev->acl_pkts)
2383 hdev->acl_cnt = hdev->acl_pkts;
2384 break;
2385
2386 case LE_LINK:
2387 if (hdev->le_pkts) {
2388 hdev->le_cnt += count;
2389 if (hdev->le_cnt > hdev->le_pkts)
2390 hdev->le_cnt = hdev->le_pkts;
2391 } else {
2392 hdev->acl_cnt += count;
2393 if (hdev->acl_cnt > hdev->acl_pkts)
2394 hdev->acl_cnt = hdev->acl_pkts;
2395 }
2396 break;
2397
2398 case SCO_LINK:
2399 hdev->sco_cnt += count;
2400 if (hdev->sco_cnt > hdev->sco_pkts)
2401 hdev->sco_cnt = hdev->sco_pkts;
2402 break;
2403
2404 default:
2405 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2406 break;
2407 }
2408 }
2409
2410 queue_work(hdev->workqueue, &hdev->tx_work);
2411 }
2412
2413 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2414 struct sk_buff *skb)
2415 {
2416 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2417 int i;
2418
2419 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2420 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2421 return;
2422 }
2423
2424 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2425 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2426 BT_DBG("%s bad parameters", hdev->name);
2427 return;
2428 }
2429
2430 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2431 ev->num_hndl);
2432
2433 for (i = 0; i < ev->num_hndl; i++) {
2434 struct hci_comp_blocks_info *info = &ev->handles[i];
2435 struct hci_conn *conn;
2436 __u16 handle, block_count;
2437
2438 handle = __le16_to_cpu(info->handle);
2439 block_count = __le16_to_cpu(info->blocks);
2440
2441 conn = hci_conn_hash_lookup_handle(hdev, handle);
2442 if (!conn)
2443 continue;
2444
2445 conn->sent -= block_count;
2446
2447 switch (conn->type) {
2448 case ACL_LINK:
2449 hdev->block_cnt += block_count;
2450 if (hdev->block_cnt > hdev->num_blocks)
2451 hdev->block_cnt = hdev->num_blocks;
2452 break;
2453
2454 default:
2455 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2456 break;
2457 }
2458 }
2459
2460 queue_work(hdev->workqueue, &hdev->tx_work);
2461 }
2462
2463 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2464 {
2465 struct hci_ev_mode_change *ev = (void *) skb->data;
2466 struct hci_conn *conn;
2467
2468 BT_DBG("%s status %d", hdev->name, ev->status);
2469
2470 hci_dev_lock(hdev);
2471
2472 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2473 if (conn) {
2474 conn->mode = ev->mode;
2475 conn->interval = __le16_to_cpu(ev->interval);
2476
2477 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2478 if (conn->mode == HCI_CM_ACTIVE)
2479 conn->power_save = 1;
2480 else
2481 conn->power_save = 0;
2482 }
2483
2484 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2485 hci_sco_setup(conn, ev->status);
2486 }
2487
2488 hci_dev_unlock(hdev);
2489 }
2490
2491 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2492 {
2493 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2494 struct hci_conn *conn;
2495
2496 BT_DBG("%s", hdev->name);
2497
2498 hci_dev_lock(hdev);
2499
2500 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2501 if (!conn)
2502 goto unlock;
2503
2504 if (conn->state == BT_CONNECTED) {
2505 hci_conn_hold(conn);
2506 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2507 hci_conn_put(conn);
2508 }
2509
2510 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2511 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2512 sizeof(ev->bdaddr), &ev->bdaddr);
2513 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2514 u8 secure;
2515
2516 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2517 secure = 1;
2518 else
2519 secure = 0;
2520
2521 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2522 }
2523
2524 unlock:
2525 hci_dev_unlock(hdev);
2526 }
2527
2528 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2529 {
2530 struct hci_ev_link_key_req *ev = (void *) skb->data;
2531 struct hci_cp_link_key_reply cp;
2532 struct hci_conn *conn;
2533 struct link_key *key;
2534
2535 BT_DBG("%s", hdev->name);
2536
2537 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2538 return;
2539
2540 hci_dev_lock(hdev);
2541
2542 key = hci_find_link_key(hdev, &ev->bdaddr);
2543 if (!key) {
2544 BT_DBG("%s link key not found for %s", hdev->name,
2545 batostr(&ev->bdaddr));
2546 goto not_found;
2547 }
2548
2549 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2550 batostr(&ev->bdaddr));
2551
2552 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2553 key->type == HCI_LK_DEBUG_COMBINATION) {
2554 BT_DBG("%s ignoring debug key", hdev->name);
2555 goto not_found;
2556 }
2557
2558 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2559 if (conn) {
2560 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2561 conn->auth_type != 0xff &&
2562 (conn->auth_type & 0x01)) {
2563 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2564 goto not_found;
2565 }
2566
2567 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2568 conn->pending_sec_level == BT_SECURITY_HIGH) {
2569 BT_DBG("%s ignoring key unauthenticated for high \
2570 security", hdev->name);
2571 goto not_found;
2572 }
2573
2574 conn->key_type = key->type;
2575 conn->pin_length = key->pin_len;
2576 }
2577
2578 bacpy(&cp.bdaddr, &ev->bdaddr);
2579 memcpy(cp.link_key, key->val, 16);
2580
2581 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2582
2583 hci_dev_unlock(hdev);
2584
2585 return;
2586
2587 not_found:
2588 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2589 hci_dev_unlock(hdev);
2590 }
2591
2592 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2593 {
2594 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2595 struct hci_conn *conn;
2596 u8 pin_len = 0;
2597
2598 BT_DBG("%s", hdev->name);
2599
2600 hci_dev_lock(hdev);
2601
2602 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2603 if (conn) {
2604 hci_conn_hold(conn);
2605 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2606 pin_len = conn->pin_length;
2607
2608 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2609 conn->key_type = ev->key_type;
2610
2611 hci_conn_put(conn);
2612 }
2613
2614 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2615 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2616 ev->key_type, pin_len);
2617
2618 hci_dev_unlock(hdev);
2619 }
2620
2621 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2622 {
2623 struct hci_ev_clock_offset *ev = (void *) skb->data;
2624 struct hci_conn *conn;
2625
2626 BT_DBG("%s status %d", hdev->name, ev->status);
2627
2628 hci_dev_lock(hdev);
2629
2630 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2631 if (conn && !ev->status) {
2632 struct inquiry_entry *ie;
2633
2634 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2635 if (ie) {
2636 ie->data.clock_offset = ev->clock_offset;
2637 ie->timestamp = jiffies;
2638 }
2639 }
2640
2641 hci_dev_unlock(hdev);
2642 }
2643
2644 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2645 {
2646 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2647 struct hci_conn *conn;
2648
2649 BT_DBG("%s status %d", hdev->name, ev->status);
2650
2651 hci_dev_lock(hdev);
2652
2653 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2654 if (conn && !ev->status)
2655 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2656
2657 hci_dev_unlock(hdev);
2658 }
2659
2660 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2661 {
2662 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2663 struct inquiry_entry *ie;
2664
2665 BT_DBG("%s", hdev->name);
2666
2667 hci_dev_lock(hdev);
2668
2669 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2670 if (ie) {
2671 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2672 ie->timestamp = jiffies;
2673 }
2674
2675 hci_dev_unlock(hdev);
2676 }
2677
2678 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2679 {
2680 struct inquiry_data data;
2681 int num_rsp = *((__u8 *) skb->data);
2682 bool name_known;
2683
2684 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2685
2686 if (!num_rsp)
2687 return;
2688
2689 hci_dev_lock(hdev);
2690
2691 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2692 struct inquiry_info_with_rssi_and_pscan_mode *info;
2693 info = (void *) (skb->data + 1);
2694
2695 for (; num_rsp; num_rsp--, info++) {
2696 bacpy(&data.bdaddr, &info->bdaddr);
2697 data.pscan_rep_mode = info->pscan_rep_mode;
2698 data.pscan_period_mode = info->pscan_period_mode;
2699 data.pscan_mode = info->pscan_mode;
2700 memcpy(data.dev_class, info->dev_class, 3);
2701 data.clock_offset = info->clock_offset;
2702 data.rssi = info->rssi;
2703 data.ssp_mode = 0x00;
2704
2705 name_known = hci_inquiry_cache_update(hdev, &data,
2706 false);
2707 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2708 info->dev_class, info->rssi,
2709 !name_known, NULL, 0);
2710 }
2711 } else {
2712 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2713
2714 for (; num_rsp; num_rsp--, info++) {
2715 bacpy(&data.bdaddr, &info->bdaddr);
2716 data.pscan_rep_mode = info->pscan_rep_mode;
2717 data.pscan_period_mode = info->pscan_period_mode;
2718 data.pscan_mode = 0x00;
2719 memcpy(data.dev_class, info->dev_class, 3);
2720 data.clock_offset = info->clock_offset;
2721 data.rssi = info->rssi;
2722 data.ssp_mode = 0x00;
2723 name_known = hci_inquiry_cache_update(hdev, &data,
2724 false);
2725 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2726 info->dev_class, info->rssi,
2727 !name_known, NULL, 0);
2728 }
2729 }
2730
2731 hci_dev_unlock(hdev);
2732 }
2733
2734 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2735 {
2736 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2737 struct hci_conn *conn;
2738
2739 BT_DBG("%s", hdev->name);
2740
2741 hci_dev_lock(hdev);
2742
2743 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2744 if (!conn)
2745 goto unlock;
2746
2747 if (!ev->status && ev->page == 0x01) {
2748 struct inquiry_entry *ie;
2749
2750 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2751 if (ie)
2752 ie->data.ssp_mode = (ev->features[0] & 0x01);
2753
2754 conn->ssp_mode = (ev->features[0] & 0x01);
2755 }
2756
2757 if (conn->state != BT_CONFIG)
2758 goto unlock;
2759
2760 if (!ev->status) {
2761 struct hci_cp_remote_name_req cp;
2762 memset(&cp, 0, sizeof(cp));
2763 bacpy(&cp.bdaddr, &conn->dst);
2764 cp.pscan_rep_mode = 0x02;
2765 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2766 }
2767
2768 if (!hci_outgoing_auth_needed(hdev, conn)) {
2769 conn->state = BT_CONNECTED;
2770 hci_proto_connect_cfm(conn, ev->status);
2771 hci_conn_put(conn);
2772 }
2773
2774 unlock:
2775 hci_dev_unlock(hdev);
2776 }
2777
2778 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2779 {
2780 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2781 struct hci_conn *conn;
2782
2783 BT_DBG("%s status %d", hdev->name, ev->status);
2784
2785 hci_dev_lock(hdev);
2786
2787 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2788 if (!conn) {
2789 if (ev->link_type == ESCO_LINK)
2790 goto unlock;
2791
2792 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2793 if (!conn)
2794 goto unlock;
2795
2796 conn->type = SCO_LINK;
2797 }
2798
2799 switch (ev->status) {
2800 case 0x00:
2801 conn->handle = __le16_to_cpu(ev->handle);
2802 conn->state = BT_CONNECTED;
2803
2804 hci_conn_hold_device(conn);
2805 hci_conn_add_sysfs(conn);
2806 break;
2807
2808 case 0x11: /* Unsupported Feature or Parameter Value */
2809 case 0x1c: /* SCO interval rejected */
2810 case 0x1a: /* Unsupported Remote Feature */
2811 case 0x1f: /* Unspecified error */
2812 if (conn->out && conn->attempt < 2) {
2813 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2814 (hdev->esco_type & EDR_ESCO_MASK);
2815 hci_setup_sync(conn, conn->link->handle);
2816 goto unlock;
2817 }
2818 /* fall through */
2819
2820 default:
2821 conn->state = BT_CLOSED;
2822 break;
2823 }
2824
2825 hci_proto_connect_cfm(conn, ev->status);
2826 if (ev->status)
2827 hci_conn_del(conn);
2828
2829 unlock:
2830 hci_dev_unlock(hdev);
2831 }
2832
2833 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2834 {
2835 BT_DBG("%s", hdev->name);
2836 }
2837
2838 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2839 {
2840 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2841
2842 BT_DBG("%s status %d", hdev->name, ev->status);
2843 }
2844
2845 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2846 {
2847 struct inquiry_data data;
2848 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2849 int num_rsp = *((__u8 *) skb->data);
2850
2851 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2852
2853 if (!num_rsp)
2854 return;
2855
2856 hci_dev_lock(hdev);
2857
2858 for (; num_rsp; num_rsp--, info++) {
2859 bool name_known;
2860
2861 bacpy(&data.bdaddr, &info->bdaddr);
2862 data.pscan_rep_mode = info->pscan_rep_mode;
2863 data.pscan_period_mode = info->pscan_period_mode;
2864 data.pscan_mode = 0x00;
2865 memcpy(data.dev_class, info->dev_class, 3);
2866 data.clock_offset = info->clock_offset;
2867 data.rssi = info->rssi;
2868 data.ssp_mode = 0x01;
2869
2870 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2871 name_known = eir_has_data_type(info->data,
2872 sizeof(info->data),
2873 EIR_NAME_COMPLETE);
2874 else
2875 name_known = true;
2876
2877 name_known = hci_inquiry_cache_update(hdev, &data, name_known);
2878 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2879 info->dev_class, info->rssi,
2880 !name_known, info->data,
2881 sizeof(info->data));
2882 }
2883
2884 hci_dev_unlock(hdev);
2885 }
2886
2887 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2888 {
2889 /* If remote requests dedicated bonding follow that lead */
2890 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2891 /* If both remote and local IO capabilities allow MITM
2892 * protection then require it, otherwise don't */
2893 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2894 return 0x02;
2895 else
2896 return 0x03;
2897 }
2898
2899 /* If remote requests no-bonding follow that lead */
2900 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2901 return conn->remote_auth | (conn->auth_type & 0x01);
2902
2903 return conn->auth_type;
2904 }
2905
2906 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2907 {
2908 struct hci_ev_io_capa_request *ev = (void *) skb->data;
2909 struct hci_conn *conn;
2910
2911 BT_DBG("%s", hdev->name);
2912
2913 hci_dev_lock(hdev);
2914
2915 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2916 if (!conn)
2917 goto unlock;
2918
2919 hci_conn_hold(conn);
2920
2921 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2922 goto unlock;
2923
2924 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
2925 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2926 struct hci_cp_io_capability_reply cp;
2927
2928 bacpy(&cp.bdaddr, &ev->bdaddr);
2929 cp.capability = conn->io_capability;
2930 conn->auth_type = hci_get_auth_req(conn);
2931 cp.authentication = conn->auth_type;
2932
2933 if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2934 hci_find_remote_oob_data(hdev, &conn->dst))
2935 cp.oob_data = 0x01;
2936 else
2937 cp.oob_data = 0x00;
2938
2939 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2940 sizeof(cp), &cp);
2941 } else {
2942 struct hci_cp_io_capability_neg_reply cp;
2943
2944 bacpy(&cp.bdaddr, &ev->bdaddr);
2945 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
2946
2947 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2948 sizeof(cp), &cp);
2949 }
2950
2951 unlock:
2952 hci_dev_unlock(hdev);
2953 }
2954
2955 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2956 {
2957 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2958 struct hci_conn *conn;
2959
2960 BT_DBG("%s", hdev->name);
2961
2962 hci_dev_lock(hdev);
2963
2964 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2965 if (!conn)
2966 goto unlock;
2967
2968 conn->remote_cap = ev->capability;
2969 conn->remote_oob = ev->oob_data;
2970 conn->remote_auth = ev->authentication;
2971
2972 unlock:
2973 hci_dev_unlock(hdev);
2974 }
2975
2976 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2977 struct sk_buff *skb)
2978 {
2979 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2980 int loc_mitm, rem_mitm, confirm_hint = 0;
2981 struct hci_conn *conn;
2982
2983 BT_DBG("%s", hdev->name);
2984
2985 hci_dev_lock(hdev);
2986
2987 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2988 goto unlock;
2989
2990 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2991 if (!conn)
2992 goto unlock;
2993
2994 loc_mitm = (conn->auth_type & 0x01);
2995 rem_mitm = (conn->remote_auth & 0x01);
2996
2997 /* If we require MITM but the remote device can't provide that
2998 * (it has NoInputNoOutput) then reject the confirmation
2999 * request. The only exception is when we're dedicated bonding
3000 * initiators (connect_cfm_cb set) since then we always have the MITM
3001 * bit set. */
3002 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3003 BT_DBG("Rejecting request: remote device can't provide MITM");
3004 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3005 sizeof(ev->bdaddr), &ev->bdaddr);
3006 goto unlock;
3007 }
3008
3009 /* If no side requires MITM protection; auto-accept */
3010 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3011 (!rem_mitm || conn->io_capability == 0x03)) {
3012
3013 /* If we're not the initiators request authorization to
3014 * proceed from user space (mgmt_user_confirm with
3015 * confirm_hint set to 1). */
3016 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3017 BT_DBG("Confirming auto-accept as acceptor");
3018 confirm_hint = 1;
3019 goto confirm;
3020 }
3021
3022 BT_DBG("Auto-accept of user confirmation with %ums delay",
3023 hdev->auto_accept_delay);
3024
3025 if (hdev->auto_accept_delay > 0) {
3026 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3027 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3028 goto unlock;
3029 }
3030
3031 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3032 sizeof(ev->bdaddr), &ev->bdaddr);
3033 goto unlock;
3034 }
3035
3036 confirm:
3037 mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
3038 confirm_hint);
3039
3040 unlock:
3041 hci_dev_unlock(hdev);
3042 }
3043
3044 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3045 struct sk_buff *skb)
3046 {
3047 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3048
3049 BT_DBG("%s", hdev->name);
3050
3051 hci_dev_lock(hdev);
3052
3053 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3054 mgmt_user_passkey_request(hdev, &ev->bdaddr);
3055
3056 hci_dev_unlock(hdev);
3057 }
3058
3059 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3060 {
3061 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3062 struct hci_conn *conn;
3063
3064 BT_DBG("%s", hdev->name);
3065
3066 hci_dev_lock(hdev);
3067
3068 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3069 if (!conn)
3070 goto unlock;
3071
3072 /* To avoid duplicate auth_failed events to user space we check
3073 * the HCI_CONN_AUTH_PEND flag which will be set if we
3074 * initiated the authentication. A traditional auth_complete
3075 * event gets always produced as initiator and is also mapped to
3076 * the mgmt_auth_failed event */
3077 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3078 mgmt_auth_failed(hdev, &conn->dst, ev->status);
3079
3080 hci_conn_put(conn);
3081
3082 unlock:
3083 hci_dev_unlock(hdev);
3084 }
3085
3086 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3087 {
3088 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3089 struct inquiry_entry *ie;
3090
3091 BT_DBG("%s", hdev->name);
3092
3093 hci_dev_lock(hdev);
3094
3095 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3096 if (ie)
3097 ie->data.ssp_mode = (ev->features[0] & 0x01);
3098
3099 hci_dev_unlock(hdev);
3100 }
3101
3102 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3103 struct sk_buff *skb)
3104 {
3105 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3106 struct oob_data *data;
3107
3108 BT_DBG("%s", hdev->name);
3109
3110 hci_dev_lock(hdev);
3111
3112 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3113 goto unlock;
3114
3115 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3116 if (data) {
3117 struct hci_cp_remote_oob_data_reply cp;
3118
3119 bacpy(&cp.bdaddr, &ev->bdaddr);
3120 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3121 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3122
3123 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3124 &cp);
3125 } else {
3126 struct hci_cp_remote_oob_data_neg_reply cp;
3127
3128 bacpy(&cp.bdaddr, &ev->bdaddr);
3129 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3130 &cp);
3131 }
3132
3133 unlock:
3134 hci_dev_unlock(hdev);
3135 }
3136
3137 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3138 {
3139 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3140 struct hci_conn *conn;
3141
3142 BT_DBG("%s status %d", hdev->name, ev->status);
3143
3144 hci_dev_lock(hdev);
3145
3146 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3147 if (!conn) {
3148 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3149 if (!conn) {
3150 BT_ERR("No memory for new connection");
3151 hci_dev_unlock(hdev);
3152 return;
3153 }
3154
3155 conn->dst_type = ev->bdaddr_type;
3156 }
3157
3158 if (ev->status) {
3159 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3160 conn->dst_type, ev->status);
3161 hci_proto_connect_cfm(conn, ev->status);
3162 conn->state = BT_CLOSED;
3163 hci_conn_del(conn);
3164 goto unlock;
3165 }
3166
3167 mgmt_device_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type);
3168
3169 conn->sec_level = BT_SECURITY_LOW;
3170 conn->handle = __le16_to_cpu(ev->handle);
3171 conn->state = BT_CONNECTED;
3172
3173 hci_conn_hold_device(conn);
3174 hci_conn_add_sysfs(conn);
3175
3176 hci_proto_connect_cfm(conn, ev->status);
3177
3178 unlock:
3179 hci_dev_unlock(hdev);
3180 }
3181
3182 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3183 struct sk_buff *skb)
3184 {
3185 u8 num_reports = skb->data[0];
3186 void *ptr = &skb->data[1];
3187 s8 rssi;
3188
3189 hci_dev_lock(hdev);
3190
3191 while (num_reports--) {
3192 struct hci_ev_le_advertising_info *ev = ptr;
3193
3194 hci_add_adv_entry(hdev, ev);
3195
3196 rssi = ev->data[ev->length];
3197 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3198 NULL, rssi, 0, ev->data, ev->length);
3199
3200 ptr += sizeof(*ev) + ev->length + 1;
3201 }
3202
3203 hci_dev_unlock(hdev);
3204 }
3205
3206 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3207 struct sk_buff *skb)
3208 {
3209 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3210 struct hci_cp_le_ltk_reply cp;
3211 struct hci_cp_le_ltk_neg_reply neg;
3212 struct hci_conn *conn;
3213 struct link_key *ltk;
3214
3215 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3216
3217 hci_dev_lock(hdev);
3218
3219 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3220 if (conn == NULL)
3221 goto not_found;
3222
3223 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3224 if (ltk == NULL)
3225 goto not_found;
3226
3227 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3228 cp.handle = cpu_to_le16(conn->handle);
3229 conn->pin_length = ltk->pin_len;
3230
3231 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3232
3233 hci_dev_unlock(hdev);
3234
3235 return;
3236
3237 not_found:
3238 neg.handle = ev->handle;
3239 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3240 hci_dev_unlock(hdev);
3241 }
3242
3243 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3244 {
3245 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3246
3247 skb_pull(skb, sizeof(*le_ev));
3248
3249 switch (le_ev->subevent) {
3250 case HCI_EV_LE_CONN_COMPLETE:
3251 hci_le_conn_complete_evt(hdev, skb);
3252 break;
3253
3254 case HCI_EV_LE_ADVERTISING_REPORT:
3255 hci_le_adv_report_evt(hdev, skb);
3256 break;
3257
3258 case HCI_EV_LE_LTK_REQ:
3259 hci_le_ltk_request_evt(hdev, skb);
3260 break;
3261
3262 default:
3263 break;
3264 }
3265 }
3266
3267 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3268 {
3269 struct hci_event_hdr *hdr = (void *) skb->data;
3270 __u8 event = hdr->evt;
3271
3272 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3273
3274 switch (event) {
3275 case HCI_EV_INQUIRY_COMPLETE:
3276 hci_inquiry_complete_evt(hdev, skb);
3277 break;
3278
3279 case HCI_EV_INQUIRY_RESULT:
3280 hci_inquiry_result_evt(hdev, skb);
3281 break;
3282
3283 case HCI_EV_CONN_COMPLETE:
3284 hci_conn_complete_evt(hdev, skb);
3285 break;
3286
3287 case HCI_EV_CONN_REQUEST:
3288 hci_conn_request_evt(hdev, skb);
3289 break;
3290
3291 case HCI_EV_DISCONN_COMPLETE:
3292 hci_disconn_complete_evt(hdev, skb);
3293 break;
3294
3295 case HCI_EV_AUTH_COMPLETE:
3296 hci_auth_complete_evt(hdev, skb);
3297 break;
3298
3299 case HCI_EV_REMOTE_NAME:
3300 hci_remote_name_evt(hdev, skb);
3301 break;
3302
3303 case HCI_EV_ENCRYPT_CHANGE:
3304 hci_encrypt_change_evt(hdev, skb);
3305 break;
3306
3307 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3308 hci_change_link_key_complete_evt(hdev, skb);
3309 break;
3310
3311 case HCI_EV_REMOTE_FEATURES:
3312 hci_remote_features_evt(hdev, skb);
3313 break;
3314
3315 case HCI_EV_REMOTE_VERSION:
3316 hci_remote_version_evt(hdev, skb);
3317 break;
3318
3319 case HCI_EV_QOS_SETUP_COMPLETE:
3320 hci_qos_setup_complete_evt(hdev, skb);
3321 break;
3322
3323 case HCI_EV_CMD_COMPLETE:
3324 hci_cmd_complete_evt(hdev, skb);
3325 break;
3326
3327 case HCI_EV_CMD_STATUS:
3328 hci_cmd_status_evt(hdev, skb);
3329 break;
3330
3331 case HCI_EV_ROLE_CHANGE:
3332 hci_role_change_evt(hdev, skb);
3333 break;
3334
3335 case HCI_EV_NUM_COMP_PKTS:
3336 hci_num_comp_pkts_evt(hdev, skb);
3337 break;
3338
3339 case HCI_EV_MODE_CHANGE:
3340 hci_mode_change_evt(hdev, skb);
3341 break;
3342
3343 case HCI_EV_PIN_CODE_REQ:
3344 hci_pin_code_request_evt(hdev, skb);
3345 break;
3346
3347 case HCI_EV_LINK_KEY_REQ:
3348 hci_link_key_request_evt(hdev, skb);
3349 break;
3350
3351 case HCI_EV_LINK_KEY_NOTIFY:
3352 hci_link_key_notify_evt(hdev, skb);
3353 break;
3354
3355 case HCI_EV_CLOCK_OFFSET:
3356 hci_clock_offset_evt(hdev, skb);
3357 break;
3358
3359 case HCI_EV_PKT_TYPE_CHANGE:
3360 hci_pkt_type_change_evt(hdev, skb);
3361 break;
3362
3363 case HCI_EV_PSCAN_REP_MODE:
3364 hci_pscan_rep_mode_evt(hdev, skb);
3365 break;
3366
3367 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3368 hci_inquiry_result_with_rssi_evt(hdev, skb);
3369 break;
3370
3371 case HCI_EV_REMOTE_EXT_FEATURES:
3372 hci_remote_ext_features_evt(hdev, skb);
3373 break;
3374
3375 case HCI_EV_SYNC_CONN_COMPLETE:
3376 hci_sync_conn_complete_evt(hdev, skb);
3377 break;
3378
3379 case HCI_EV_SYNC_CONN_CHANGED:
3380 hci_sync_conn_changed_evt(hdev, skb);
3381 break;
3382
3383 case HCI_EV_SNIFF_SUBRATE:
3384 hci_sniff_subrate_evt(hdev, skb);
3385 break;
3386
3387 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3388 hci_extended_inquiry_result_evt(hdev, skb);
3389 break;
3390
3391 case HCI_EV_IO_CAPA_REQUEST:
3392 hci_io_capa_request_evt(hdev, skb);
3393 break;
3394
3395 case HCI_EV_IO_CAPA_REPLY:
3396 hci_io_capa_reply_evt(hdev, skb);
3397 break;
3398
3399 case HCI_EV_USER_CONFIRM_REQUEST:
3400 hci_user_confirm_request_evt(hdev, skb);
3401 break;
3402
3403 case HCI_EV_USER_PASSKEY_REQUEST:
3404 hci_user_passkey_request_evt(hdev, skb);
3405 break;
3406
3407 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3408 hci_simple_pair_complete_evt(hdev, skb);
3409 break;
3410
3411 case HCI_EV_REMOTE_HOST_FEATURES:
3412 hci_remote_host_features_evt(hdev, skb);
3413 break;
3414
3415 case HCI_EV_LE_META:
3416 hci_le_meta_evt(hdev, skb);
3417 break;
3418
3419 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3420 hci_remote_oob_data_request_evt(hdev, skb);
3421 break;
3422
3423 case HCI_EV_NUM_COMP_BLOCKS:
3424 hci_num_comp_blocks_evt(hdev, skb);
3425 break;
3426
3427 default:
3428 BT_DBG("%s event 0x%x", hdev->name, event);
3429 break;
3430 }
3431
3432 kfree_skb(skb);
3433 hdev->stat.evt_rx++;
3434 }
3435
3436 /* Generate internal stack event */
3437 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3438 {
3439 struct hci_event_hdr *hdr;
3440 struct hci_ev_stack_internal *ev;
3441 struct sk_buff *skb;
3442
3443 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3444 if (!skb)
3445 return;
3446
3447 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3448 hdr->evt = HCI_EV_STACK_INTERNAL;
3449 hdr->plen = sizeof(*ev) + dlen;
3450
3451 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
3452 ev->type = type;
3453 memcpy(ev->data, data, dlen);
3454
3455 bt_cb(skb)->incoming = 1;
3456 __net_timestamp(skb);
3457
3458 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3459 skb->dev = (void *) hdev;
3460 hci_send_to_sock(hdev, skb, NULL);
3461 kfree_skb(skb);
3462 }
3463
3464 module_param(enable_le, bool, 0644);
3465 MODULE_PARM_DESC(enable_le, "Enable LE support");