Bluetooth: Move mgmt related flags from hdev->flags to hdev->dev_flags
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 static bool enable_le;
49
50 /* Handle HCI Event packets */
51
52 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
53 {
54 __u8 status = *((__u8 *) skb->data);
55
56 BT_DBG("%s status 0x%x", hdev->name, status);
57
58 if (status) {
59 hci_dev_lock(hdev);
60 mgmt_stop_discovery_failed(hdev, status);
61 hci_dev_unlock(hdev);
62 return;
63 }
64
65 clear_bit(HCI_INQUIRY, &hdev->flags);
66
67 hci_dev_lock(hdev);
68 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
69 hci_dev_unlock(hdev);
70
71 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
72
73 hci_conn_check_pending(hdev);
74 }
75
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77 {
78 __u8 status = *((__u8 *) skb->data);
79
80 BT_DBG("%s status 0x%x", hdev->name, status);
81
82 if (status)
83 return;
84
85 hci_conn_check_pending(hdev);
86 }
87
88 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160 {
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163 BT_DBG("%s status 0x%x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 hdev->link_policy = __le16_to_cpu(rp->policy);
169 }
170
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
172 {
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%x", hdev->name, status);
177
178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 if (!sent)
180 return;
181
182 if (!status)
183 hdev->link_policy = get_unaligned_le16(sent);
184
185 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 hci_req_complete(hdev, HCI_OP_RESET, status);
197
198 hdev->dev_flags = 0;
199 }
200
201 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
202 {
203 __u8 status = *((__u8 *) skb->data);
204 void *sent;
205
206 BT_DBG("%s status 0x%x", hdev->name, status);
207
208 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
209 if (!sent)
210 return;
211
212 hci_dev_lock(hdev);
213
214 if (test_bit(HCI_MGMT, &hdev->dev_flags))
215 mgmt_set_local_name_complete(hdev, sent, status);
216
217 if (status == 0)
218 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
219
220 hci_dev_unlock(hdev);
221 }
222
223 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
224 {
225 struct hci_rp_read_local_name *rp = (void *) skb->data;
226
227 BT_DBG("%s status 0x%x", hdev->name, rp->status);
228
229 if (rp->status)
230 return;
231
232 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
233 }
234
235 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
236 {
237 __u8 status = *((__u8 *) skb->data);
238 void *sent;
239
240 BT_DBG("%s status 0x%x", hdev->name, status);
241
242 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
243 if (!sent)
244 return;
245
246 if (!status) {
247 __u8 param = *((__u8 *) sent);
248
249 if (param == AUTH_ENABLED)
250 set_bit(HCI_AUTH, &hdev->flags);
251 else
252 clear_bit(HCI_AUTH, &hdev->flags);
253 }
254
255 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
256 }
257
258 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
259 {
260 __u8 status = *((__u8 *) skb->data);
261 void *sent;
262
263 BT_DBG("%s status 0x%x", hdev->name, status);
264
265 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
266 if (!sent)
267 return;
268
269 if (!status) {
270 __u8 param = *((__u8 *) sent);
271
272 if (param)
273 set_bit(HCI_ENCRYPT, &hdev->flags);
274 else
275 clear_bit(HCI_ENCRYPT, &hdev->flags);
276 }
277
278 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
279 }
280
281 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
282 {
283 __u8 param, status = *((__u8 *) skb->data);
284 int old_pscan, old_iscan;
285 void *sent;
286
287 BT_DBG("%s status 0x%x", hdev->name, status);
288
289 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
290 if (!sent)
291 return;
292
293 param = *((__u8 *) sent);
294
295 hci_dev_lock(hdev);
296
297 if (status != 0) {
298 mgmt_write_scan_failed(hdev, param, status);
299 hdev->discov_timeout = 0;
300 goto done;
301 }
302
303 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
304 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
305
306 if (param & SCAN_INQUIRY) {
307 set_bit(HCI_ISCAN, &hdev->flags);
308 if (!old_iscan)
309 mgmt_discoverable(hdev, 1);
310 if (hdev->discov_timeout > 0) {
311 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
312 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
313 to);
314 }
315 } else if (old_iscan)
316 mgmt_discoverable(hdev, 0);
317
318 if (param & SCAN_PAGE) {
319 set_bit(HCI_PSCAN, &hdev->flags);
320 if (!old_pscan)
321 mgmt_connectable(hdev, 1);
322 } else if (old_pscan)
323 mgmt_connectable(hdev, 0);
324
325 done:
326 hci_dev_unlock(hdev);
327 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
328 }
329
330 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
331 {
332 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
333
334 BT_DBG("%s status 0x%x", hdev->name, rp->status);
335
336 if (rp->status)
337 return;
338
339 memcpy(hdev->dev_class, rp->dev_class, 3);
340
341 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
342 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
343 }
344
345 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
346 {
347 __u8 status = *((__u8 *) skb->data);
348 void *sent;
349
350 BT_DBG("%s status 0x%x", hdev->name, status);
351
352 if (status)
353 return;
354
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
356 if (!sent)
357 return;
358
359 memcpy(hdev->dev_class, sent, 3);
360 }
361
362 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
363 {
364 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
365 __u16 setting;
366
367 BT_DBG("%s status 0x%x", hdev->name, rp->status);
368
369 if (rp->status)
370 return;
371
372 setting = __le16_to_cpu(rp->voice_setting);
373
374 if (hdev->voice_setting == setting)
375 return;
376
377 hdev->voice_setting = setting;
378
379 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
380
381 if (hdev->notify)
382 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
383 }
384
385 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
386 {
387 __u8 status = *((__u8 *) skb->data);
388 __u16 setting;
389 void *sent;
390
391 BT_DBG("%s status 0x%x", hdev->name, status);
392
393 if (status)
394 return;
395
396 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
397 if (!sent)
398 return;
399
400 setting = get_unaligned_le16(sent);
401
402 if (hdev->voice_setting == setting)
403 return;
404
405 hdev->voice_setting = setting;
406
407 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
408
409 if (hdev->notify)
410 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
411 }
412
413 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
414 {
415 __u8 status = *((__u8 *) skb->data);
416
417 BT_DBG("%s status 0x%x", hdev->name, status);
418
419 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
420 }
421
422 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
423 {
424 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
425
426 BT_DBG("%s status 0x%x", hdev->name, rp->status);
427
428 if (rp->status)
429 return;
430
431 hdev->ssp_mode = rp->mode;
432 }
433
434 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
435 {
436 __u8 status = *((__u8 *) skb->data);
437 void *sent;
438
439 BT_DBG("%s status 0x%x", hdev->name, status);
440
441 if (status)
442 return;
443
444 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
445 if (!sent)
446 return;
447
448 hdev->ssp_mode = *((__u8 *) sent);
449 }
450
451 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
452 {
453 if (hdev->features[6] & LMP_EXT_INQ)
454 return 2;
455
456 if (hdev->features[3] & LMP_RSSI_INQ)
457 return 1;
458
459 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
460 hdev->lmp_subver == 0x0757)
461 return 1;
462
463 if (hdev->manufacturer == 15) {
464 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
465 return 1;
466 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
467 return 1;
468 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
469 return 1;
470 }
471
472 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
473 hdev->lmp_subver == 0x1805)
474 return 1;
475
476 return 0;
477 }
478
479 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
480 {
481 u8 mode;
482
483 mode = hci_get_inquiry_mode(hdev);
484
485 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
486 }
487
488 static void hci_setup_event_mask(struct hci_dev *hdev)
489 {
490 /* The second byte is 0xff instead of 0x9f (two reserved bits
491 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
492 * command otherwise */
493 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
494
495 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
496 * any event mask for pre 1.2 devices */
497 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
498 return;
499
500 events[4] |= 0x01; /* Flow Specification Complete */
501 events[4] |= 0x02; /* Inquiry Result with RSSI */
502 events[4] |= 0x04; /* Read Remote Extended Features Complete */
503 events[5] |= 0x08; /* Synchronous Connection Complete */
504 events[5] |= 0x10; /* Synchronous Connection Changed */
505
506 if (hdev->features[3] & LMP_RSSI_INQ)
507 events[4] |= 0x04; /* Inquiry Result with RSSI */
508
509 if (hdev->features[5] & LMP_SNIFF_SUBR)
510 events[5] |= 0x20; /* Sniff Subrating */
511
512 if (hdev->features[5] & LMP_PAUSE_ENC)
513 events[5] |= 0x80; /* Encryption Key Refresh Complete */
514
515 if (hdev->features[6] & LMP_EXT_INQ)
516 events[5] |= 0x40; /* Extended Inquiry Result */
517
518 if (hdev->features[6] & LMP_NO_FLUSH)
519 events[7] |= 0x01; /* Enhanced Flush Complete */
520
521 if (hdev->features[7] & LMP_LSTO)
522 events[6] |= 0x80; /* Link Supervision Timeout Changed */
523
524 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
525 events[6] |= 0x01; /* IO Capability Request */
526 events[6] |= 0x02; /* IO Capability Response */
527 events[6] |= 0x04; /* User Confirmation Request */
528 events[6] |= 0x08; /* User Passkey Request */
529 events[6] |= 0x10; /* Remote OOB Data Request */
530 events[6] |= 0x20; /* Simple Pairing Complete */
531 events[7] |= 0x04; /* User Passkey Notification */
532 events[7] |= 0x08; /* Keypress Notification */
533 events[7] |= 0x10; /* Remote Host Supported
534 * Features Notification */
535 }
536
537 if (hdev->features[4] & LMP_LE)
538 events[7] |= 0x20; /* LE Meta-Event */
539
540 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
541 }
542
543 static void hci_set_le_support(struct hci_dev *hdev)
544 {
545 struct hci_cp_write_le_host_supported cp;
546
547 memset(&cp, 0, sizeof(cp));
548
549 if (enable_le) {
550 cp.le = 1;
551 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
552 }
553
554 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
555 }
556
557 static void hci_setup(struct hci_dev *hdev)
558 {
559 if (hdev->dev_type != HCI_BREDR)
560 return;
561
562 hci_setup_event_mask(hdev);
563
564 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
565 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
566
567 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
568 u8 mode = 0x01;
569 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
570 }
571
572 if (hdev->features[3] & LMP_RSSI_INQ)
573 hci_setup_inquiry_mode(hdev);
574
575 if (hdev->features[7] & LMP_INQ_TX_PWR)
576 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
577
578 if (hdev->features[7] & LMP_EXTFEATURES) {
579 struct hci_cp_read_local_ext_features cp;
580
581 cp.page = 0x01;
582 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
583 sizeof(cp), &cp);
584 }
585
586 if (hdev->features[4] & LMP_LE)
587 hci_set_le_support(hdev);
588 }
589
590 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
591 {
592 struct hci_rp_read_local_version *rp = (void *) skb->data;
593
594 BT_DBG("%s status 0x%x", hdev->name, rp->status);
595
596 if (rp->status)
597 return;
598
599 hdev->hci_ver = rp->hci_ver;
600 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
601 hdev->lmp_ver = rp->lmp_ver;
602 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
603 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
604
605 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
606 hdev->manufacturer,
607 hdev->hci_ver, hdev->hci_rev);
608
609 if (test_bit(HCI_INIT, &hdev->flags))
610 hci_setup(hdev);
611 }
612
613 static void hci_setup_link_policy(struct hci_dev *hdev)
614 {
615 u16 link_policy = 0;
616
617 if (hdev->features[0] & LMP_RSWITCH)
618 link_policy |= HCI_LP_RSWITCH;
619 if (hdev->features[0] & LMP_HOLD)
620 link_policy |= HCI_LP_HOLD;
621 if (hdev->features[0] & LMP_SNIFF)
622 link_policy |= HCI_LP_SNIFF;
623 if (hdev->features[1] & LMP_PARK)
624 link_policy |= HCI_LP_PARK;
625
626 link_policy = cpu_to_le16(link_policy);
627 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
628 sizeof(link_policy), &link_policy);
629 }
630
631 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
632 {
633 struct hci_rp_read_local_commands *rp = (void *) skb->data;
634
635 BT_DBG("%s status 0x%x", hdev->name, rp->status);
636
637 if (rp->status)
638 goto done;
639
640 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
641
642 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
643 hci_setup_link_policy(hdev);
644
645 done:
646 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
647 }
648
649 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
650 {
651 struct hci_rp_read_local_features *rp = (void *) skb->data;
652
653 BT_DBG("%s status 0x%x", hdev->name, rp->status);
654
655 if (rp->status)
656 return;
657
658 memcpy(hdev->features, rp->features, 8);
659
660 /* Adjust default settings according to features
661 * supported by device. */
662
663 if (hdev->features[0] & LMP_3SLOT)
664 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
665
666 if (hdev->features[0] & LMP_5SLOT)
667 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
668
669 if (hdev->features[1] & LMP_HV2) {
670 hdev->pkt_type |= (HCI_HV2);
671 hdev->esco_type |= (ESCO_HV2);
672 }
673
674 if (hdev->features[1] & LMP_HV3) {
675 hdev->pkt_type |= (HCI_HV3);
676 hdev->esco_type |= (ESCO_HV3);
677 }
678
679 if (hdev->features[3] & LMP_ESCO)
680 hdev->esco_type |= (ESCO_EV3);
681
682 if (hdev->features[4] & LMP_EV4)
683 hdev->esco_type |= (ESCO_EV4);
684
685 if (hdev->features[4] & LMP_EV5)
686 hdev->esco_type |= (ESCO_EV5);
687
688 if (hdev->features[5] & LMP_EDR_ESCO_2M)
689 hdev->esco_type |= (ESCO_2EV3);
690
691 if (hdev->features[5] & LMP_EDR_ESCO_3M)
692 hdev->esco_type |= (ESCO_3EV3);
693
694 if (hdev->features[5] & LMP_EDR_3S_ESCO)
695 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
696
697 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
698 hdev->features[0], hdev->features[1],
699 hdev->features[2], hdev->features[3],
700 hdev->features[4], hdev->features[5],
701 hdev->features[6], hdev->features[7]);
702 }
703
704 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
705 struct sk_buff *skb)
706 {
707 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
708
709 BT_DBG("%s status 0x%x", hdev->name, rp->status);
710
711 if (rp->status)
712 return;
713
714 switch (rp->page) {
715 case 0:
716 memcpy(hdev->features, rp->features, 8);
717 break;
718 case 1:
719 memcpy(hdev->host_features, rp->features, 8);
720 break;
721 }
722
723 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
724 }
725
726 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
727 struct sk_buff *skb)
728 {
729 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
730
731 BT_DBG("%s status 0x%x", hdev->name, rp->status);
732
733 if (rp->status)
734 return;
735
736 hdev->flow_ctl_mode = rp->mode;
737
738 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
739 }
740
741 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
742 {
743 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
744
745 BT_DBG("%s status 0x%x", hdev->name, rp->status);
746
747 if (rp->status)
748 return;
749
750 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
751 hdev->sco_mtu = rp->sco_mtu;
752 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
753 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
754
755 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
756 hdev->sco_mtu = 64;
757 hdev->sco_pkts = 8;
758 }
759
760 hdev->acl_cnt = hdev->acl_pkts;
761 hdev->sco_cnt = hdev->sco_pkts;
762
763 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
764 hdev->acl_mtu, hdev->acl_pkts,
765 hdev->sco_mtu, hdev->sco_pkts);
766 }
767
768 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
769 {
770 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
771
772 BT_DBG("%s status 0x%x", hdev->name, rp->status);
773
774 if (!rp->status)
775 bacpy(&hdev->bdaddr, &rp->bdaddr);
776
777 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
778 }
779
780 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
781 struct sk_buff *skb)
782 {
783 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
784
785 BT_DBG("%s status 0x%x", hdev->name, rp->status);
786
787 if (rp->status)
788 return;
789
790 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
791 hdev->block_len = __le16_to_cpu(rp->block_len);
792 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
793
794 hdev->block_cnt = hdev->num_blocks;
795
796 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
797 hdev->block_cnt, hdev->block_len);
798
799 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
800 }
801
802 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
803 {
804 __u8 status = *((__u8 *) skb->data);
805
806 BT_DBG("%s status 0x%x", hdev->name, status);
807
808 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
809 }
810
811 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
812 struct sk_buff *skb)
813 {
814 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
815
816 BT_DBG("%s status 0x%x", hdev->name, rp->status);
817
818 if (rp->status)
819 return;
820
821 hdev->amp_status = rp->amp_status;
822 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
823 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
824 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
825 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
826 hdev->amp_type = rp->amp_type;
827 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
828 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
829 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
830 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
831
832 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
833 }
834
835 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
836 struct sk_buff *skb)
837 {
838 __u8 status = *((__u8 *) skb->data);
839
840 BT_DBG("%s status 0x%x", hdev->name, status);
841
842 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
843 }
844
845 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
846 {
847 __u8 status = *((__u8 *) skb->data);
848
849 BT_DBG("%s status 0x%x", hdev->name, status);
850
851 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
852 }
853
854 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
855 struct sk_buff *skb)
856 {
857 __u8 status = *((__u8 *) skb->data);
858
859 BT_DBG("%s status 0x%x", hdev->name, status);
860
861 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
862 }
863
864 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
865 struct sk_buff *skb)
866 {
867 __u8 status = *((__u8 *) skb->data);
868
869 BT_DBG("%s status 0x%x", hdev->name, status);
870
871 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
872 }
873
874 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
875 {
876 __u8 status = *((__u8 *) skb->data);
877
878 BT_DBG("%s status 0x%x", hdev->name, status);
879
880 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
881 }
882
883 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
884 {
885 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
886 struct hci_cp_pin_code_reply *cp;
887 struct hci_conn *conn;
888
889 BT_DBG("%s status 0x%x", hdev->name, rp->status);
890
891 hci_dev_lock(hdev);
892
893 if (test_bit(HCI_MGMT, &hdev->dev_flags))
894 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
895
896 if (rp->status != 0)
897 goto unlock;
898
899 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
900 if (!cp)
901 goto unlock;
902
903 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
904 if (conn)
905 conn->pin_length = cp->pin_len;
906
907 unlock:
908 hci_dev_unlock(hdev);
909 }
910
911 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
912 {
913 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
914
915 BT_DBG("%s status 0x%x", hdev->name, rp->status);
916
917 hci_dev_lock(hdev);
918
919 if (test_bit(HCI_MGMT, &hdev->dev_flags))
920 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
921 rp->status);
922
923 hci_dev_unlock(hdev);
924 }
925
926 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
927 struct sk_buff *skb)
928 {
929 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
930
931 BT_DBG("%s status 0x%x", hdev->name, rp->status);
932
933 if (rp->status)
934 return;
935
936 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
937 hdev->le_pkts = rp->le_max_pkt;
938
939 hdev->le_cnt = hdev->le_pkts;
940
941 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
942
943 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
944 }
945
946 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
947 {
948 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
949
950 BT_DBG("%s status 0x%x", hdev->name, rp->status);
951
952 hci_dev_lock(hdev);
953
954 if (test_bit(HCI_MGMT, &hdev->dev_flags))
955 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
956 rp->status);
957
958 hci_dev_unlock(hdev);
959 }
960
961 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
962 struct sk_buff *skb)
963 {
964 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
965
966 BT_DBG("%s status 0x%x", hdev->name, rp->status);
967
968 hci_dev_lock(hdev);
969
970 if (test_bit(HCI_MGMT, &hdev->dev_flags))
971 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
972 rp->status);
973
974 hci_dev_unlock(hdev);
975 }
976
977 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
978 {
979 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
980
981 BT_DBG("%s status 0x%x", hdev->name, rp->status);
982
983 hci_dev_lock(hdev);
984
985 if (test_bit(HCI_MGMT, &hdev->dev_flags))
986 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
987 rp->status);
988
989 hci_dev_unlock(hdev);
990 }
991
992 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
993 struct sk_buff *skb)
994 {
995 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
996
997 BT_DBG("%s status 0x%x", hdev->name, rp->status);
998
999 hci_dev_lock(hdev);
1000
1001 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1002 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1003 rp->status);
1004
1005 hci_dev_unlock(hdev);
1006 }
1007
1008 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1009 struct sk_buff *skb)
1010 {
1011 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1012
1013 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1014
1015 hci_dev_lock(hdev);
1016 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1017 rp->randomizer, rp->status);
1018 hci_dev_unlock(hdev);
1019 }
1020
1021 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1022 {
1023 __u8 status = *((__u8 *) skb->data);
1024
1025 BT_DBG("%s status 0x%x", hdev->name, status);
1026 }
1027
1028 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1029 struct sk_buff *skb)
1030 {
1031 struct hci_cp_le_set_scan_enable *cp;
1032 __u8 status = *((__u8 *) skb->data);
1033
1034 BT_DBG("%s status 0x%x", hdev->name, status);
1035
1036 if (status)
1037 return;
1038
1039 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1040 if (!cp)
1041 return;
1042
1043 switch (cp->enable) {
1044 case LE_SCANNING_ENABLED:
1045 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1046
1047 cancel_delayed_work_sync(&hdev->adv_work);
1048
1049 hci_dev_lock(hdev);
1050 hci_adv_entries_clear(hdev);
1051 hci_dev_unlock(hdev);
1052 break;
1053
1054 case LE_SCANNING_DISABLED:
1055 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1056
1057 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1058 break;
1059
1060 default:
1061 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1062 break;
1063 }
1064 }
1065
1066 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1067 {
1068 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1069
1070 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1071
1072 if (rp->status)
1073 return;
1074
1075 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1076 }
1077
1078 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1079 {
1080 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1081
1082 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1083
1084 if (rp->status)
1085 return;
1086
1087 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1088 }
1089
1090 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1091 struct sk_buff *skb)
1092 {
1093 struct hci_cp_read_local_ext_features cp;
1094 __u8 status = *((__u8 *) skb->data);
1095
1096 BT_DBG("%s status 0x%x", hdev->name, status);
1097
1098 if (status)
1099 return;
1100
1101 cp.page = 0x01;
1102 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
1103 }
1104
1105 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1106 {
1107 BT_DBG("%s status 0x%x", hdev->name, status);
1108
1109 if (status) {
1110 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1111 hci_conn_check_pending(hdev);
1112 hci_dev_lock(hdev);
1113 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1114 mgmt_start_discovery_failed(hdev, status);
1115 hci_dev_unlock(hdev);
1116 return;
1117 }
1118
1119 set_bit(HCI_INQUIRY, &hdev->flags);
1120
1121 hci_dev_lock(hdev);
1122 hci_discovery_set_state(hdev, DISCOVERY_INQUIRY);
1123 hci_dev_unlock(hdev);
1124 }
1125
1126 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1127 {
1128 struct hci_cp_create_conn *cp;
1129 struct hci_conn *conn;
1130
1131 BT_DBG("%s status 0x%x", hdev->name, status);
1132
1133 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1134 if (!cp)
1135 return;
1136
1137 hci_dev_lock(hdev);
1138
1139 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1140
1141 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1142
1143 if (status) {
1144 if (conn && conn->state == BT_CONNECT) {
1145 if (status != 0x0c || conn->attempt > 2) {
1146 conn->state = BT_CLOSED;
1147 hci_proto_connect_cfm(conn, status);
1148 hci_conn_del(conn);
1149 } else
1150 conn->state = BT_CONNECT2;
1151 }
1152 } else {
1153 if (!conn) {
1154 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1155 if (conn) {
1156 conn->out = 1;
1157 conn->link_mode |= HCI_LM_MASTER;
1158 } else
1159 BT_ERR("No memory for new connection");
1160 }
1161 }
1162
1163 hci_dev_unlock(hdev);
1164 }
1165
1166 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1167 {
1168 struct hci_cp_add_sco *cp;
1169 struct hci_conn *acl, *sco;
1170 __u16 handle;
1171
1172 BT_DBG("%s status 0x%x", hdev->name, status);
1173
1174 if (!status)
1175 return;
1176
1177 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1178 if (!cp)
1179 return;
1180
1181 handle = __le16_to_cpu(cp->handle);
1182
1183 BT_DBG("%s handle %d", hdev->name, handle);
1184
1185 hci_dev_lock(hdev);
1186
1187 acl = hci_conn_hash_lookup_handle(hdev, handle);
1188 if (acl) {
1189 sco = acl->link;
1190 if (sco) {
1191 sco->state = BT_CLOSED;
1192
1193 hci_proto_connect_cfm(sco, status);
1194 hci_conn_del(sco);
1195 }
1196 }
1197
1198 hci_dev_unlock(hdev);
1199 }
1200
1201 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1202 {
1203 struct hci_cp_auth_requested *cp;
1204 struct hci_conn *conn;
1205
1206 BT_DBG("%s status 0x%x", hdev->name, status);
1207
1208 if (!status)
1209 return;
1210
1211 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1212 if (!cp)
1213 return;
1214
1215 hci_dev_lock(hdev);
1216
1217 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1218 if (conn) {
1219 if (conn->state == BT_CONFIG) {
1220 hci_proto_connect_cfm(conn, status);
1221 hci_conn_put(conn);
1222 }
1223 }
1224
1225 hci_dev_unlock(hdev);
1226 }
1227
1228 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1229 {
1230 struct hci_cp_set_conn_encrypt *cp;
1231 struct hci_conn *conn;
1232
1233 BT_DBG("%s status 0x%x", hdev->name, status);
1234
1235 if (!status)
1236 return;
1237
1238 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1239 if (!cp)
1240 return;
1241
1242 hci_dev_lock(hdev);
1243
1244 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1245 if (conn) {
1246 if (conn->state == BT_CONFIG) {
1247 hci_proto_connect_cfm(conn, status);
1248 hci_conn_put(conn);
1249 }
1250 }
1251
1252 hci_dev_unlock(hdev);
1253 }
1254
1255 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1256 struct hci_conn *conn)
1257 {
1258 if (conn->state != BT_CONFIG || !conn->out)
1259 return 0;
1260
1261 if (conn->pending_sec_level == BT_SECURITY_SDP)
1262 return 0;
1263
1264 /* Only request authentication for SSP connections or non-SSP
1265 * devices with sec_level HIGH or if MITM protection is requested */
1266 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
1267 conn->pending_sec_level != BT_SECURITY_HIGH &&
1268 !(conn->auth_type & 0x01))
1269 return 0;
1270
1271 return 1;
1272 }
1273
1274 static inline int hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e)
1275 {
1276 struct hci_cp_remote_name_req cp;
1277
1278 memset(&cp, 0, sizeof(cp));
1279
1280 bacpy(&cp.bdaddr, &e->data.bdaddr);
1281 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1282 cp.pscan_mode = e->data.pscan_mode;
1283 cp.clock_offset = e->data.clock_offset;
1284
1285 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1286 }
1287
1288 static void hci_resolve_next_name(struct hci_dev *hdev, bdaddr_t *bdaddr)
1289 {
1290 struct discovery_state *discov = &hdev->discovery;
1291 struct inquiry_entry *e;
1292
1293 if (discov->state == DISCOVERY_STOPPING)
1294 goto discov_complete;
1295
1296 if (discov->state != DISCOVERY_RESOLVING)
1297 return;
1298
1299 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1300 if (e) {
1301 e->name_state = NAME_KNOWN;
1302 list_del(&e->list);
1303 }
1304
1305 if (list_empty(&discov->resolve))
1306 goto discov_complete;
1307
1308 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1309 if (hci_resolve_name(hdev, e) == 0) {
1310 e->name_state = NAME_PENDING;
1311 return;
1312 }
1313
1314 discov_complete:
1315 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1316 }
1317
1318 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1319 {
1320 struct hci_cp_remote_name_req *cp;
1321 struct hci_conn *conn;
1322
1323 BT_DBG("%s status 0x%x", hdev->name, status);
1324
1325 /* If successful wait for the name req complete event before
1326 * checking for the need to do authentication */
1327 if (!status)
1328 return;
1329
1330 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1331 if (!cp)
1332 return;
1333
1334 hci_dev_lock(hdev);
1335
1336 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1337 hci_resolve_next_name(hdev, &cp->bdaddr);
1338
1339 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1340 if (!conn)
1341 goto unlock;
1342
1343 if (!hci_outgoing_auth_needed(hdev, conn))
1344 goto unlock;
1345
1346 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1347 struct hci_cp_auth_requested cp;
1348 cp.handle = __cpu_to_le16(conn->handle);
1349 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1350 }
1351
1352 unlock:
1353 hci_dev_unlock(hdev);
1354 }
1355
1356 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1357 {
1358 struct hci_cp_read_remote_features *cp;
1359 struct hci_conn *conn;
1360
1361 BT_DBG("%s status 0x%x", hdev->name, status);
1362
1363 if (!status)
1364 return;
1365
1366 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1367 if (!cp)
1368 return;
1369
1370 hci_dev_lock(hdev);
1371
1372 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1373 if (conn) {
1374 if (conn->state == BT_CONFIG) {
1375 hci_proto_connect_cfm(conn, status);
1376 hci_conn_put(conn);
1377 }
1378 }
1379
1380 hci_dev_unlock(hdev);
1381 }
1382
1383 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1384 {
1385 struct hci_cp_read_remote_ext_features *cp;
1386 struct hci_conn *conn;
1387
1388 BT_DBG("%s status 0x%x", hdev->name, status);
1389
1390 if (!status)
1391 return;
1392
1393 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1394 if (!cp)
1395 return;
1396
1397 hci_dev_lock(hdev);
1398
1399 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1400 if (conn) {
1401 if (conn->state == BT_CONFIG) {
1402 hci_proto_connect_cfm(conn, status);
1403 hci_conn_put(conn);
1404 }
1405 }
1406
1407 hci_dev_unlock(hdev);
1408 }
1409
1410 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1411 {
1412 struct hci_cp_setup_sync_conn *cp;
1413 struct hci_conn *acl, *sco;
1414 __u16 handle;
1415
1416 BT_DBG("%s status 0x%x", hdev->name, status);
1417
1418 if (!status)
1419 return;
1420
1421 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1422 if (!cp)
1423 return;
1424
1425 handle = __le16_to_cpu(cp->handle);
1426
1427 BT_DBG("%s handle %d", hdev->name, handle);
1428
1429 hci_dev_lock(hdev);
1430
1431 acl = hci_conn_hash_lookup_handle(hdev, handle);
1432 if (acl) {
1433 sco = acl->link;
1434 if (sco) {
1435 sco->state = BT_CLOSED;
1436
1437 hci_proto_connect_cfm(sco, status);
1438 hci_conn_del(sco);
1439 }
1440 }
1441
1442 hci_dev_unlock(hdev);
1443 }
1444
1445 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1446 {
1447 struct hci_cp_sniff_mode *cp;
1448 struct hci_conn *conn;
1449
1450 BT_DBG("%s status 0x%x", hdev->name, status);
1451
1452 if (!status)
1453 return;
1454
1455 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1456 if (!cp)
1457 return;
1458
1459 hci_dev_lock(hdev);
1460
1461 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1462 if (conn) {
1463 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1464
1465 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1466 hci_sco_setup(conn, status);
1467 }
1468
1469 hci_dev_unlock(hdev);
1470 }
1471
1472 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1473 {
1474 struct hci_cp_exit_sniff_mode *cp;
1475 struct hci_conn *conn;
1476
1477 BT_DBG("%s status 0x%x", hdev->name, status);
1478
1479 if (!status)
1480 return;
1481
1482 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1483 if (!cp)
1484 return;
1485
1486 hci_dev_lock(hdev);
1487
1488 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1489 if (conn) {
1490 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1491
1492 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1493 hci_sco_setup(conn, status);
1494 }
1495
1496 hci_dev_unlock(hdev);
1497 }
1498
1499 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1500 {
1501 struct hci_cp_le_create_conn *cp;
1502 struct hci_conn *conn;
1503
1504 BT_DBG("%s status 0x%x", hdev->name, status);
1505
1506 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1507 if (!cp)
1508 return;
1509
1510 hci_dev_lock(hdev);
1511
1512 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1513
1514 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1515 conn);
1516
1517 if (status) {
1518 if (conn && conn->state == BT_CONNECT) {
1519 conn->state = BT_CLOSED;
1520 hci_proto_connect_cfm(conn, status);
1521 hci_conn_del(conn);
1522 }
1523 } else {
1524 if (!conn) {
1525 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1526 if (conn) {
1527 conn->dst_type = cp->peer_addr_type;
1528 conn->out = 1;
1529 } else {
1530 BT_ERR("No memory for new connection");
1531 }
1532 }
1533 }
1534
1535 hci_dev_unlock(hdev);
1536 }
1537
1538 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1539 {
1540 BT_DBG("%s status 0x%x", hdev->name, status);
1541 }
1542
1543 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1544 {
1545 __u8 status = *((__u8 *) skb->data);
1546 struct discovery_state *discov = &hdev->discovery;
1547 struct inquiry_entry *e;
1548
1549 BT_DBG("%s status %d", hdev->name, status);
1550
1551 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1552
1553 hci_conn_check_pending(hdev);
1554
1555 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1556 return;
1557
1558 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1559 return;
1560
1561 hci_dev_lock(hdev);
1562
1563 if (discov->state != DISCOVERY_INQUIRY)
1564 goto unlock;
1565
1566 if (list_empty(&discov->resolve)) {
1567 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1568 goto unlock;
1569 }
1570
1571 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1572 if (e && hci_resolve_name(hdev, e) == 0) {
1573 e->name_state = NAME_PENDING;
1574 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1575 } else {
1576 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1577 }
1578
1579 unlock:
1580 hci_dev_unlock(hdev);
1581 }
1582
1583 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1584 {
1585 struct inquiry_data data;
1586 struct inquiry_info *info = (void *) (skb->data + 1);
1587 int num_rsp = *((__u8 *) skb->data);
1588
1589 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1590
1591 if (!num_rsp)
1592 return;
1593
1594 hci_dev_lock(hdev);
1595
1596 for (; num_rsp; num_rsp--, info++) {
1597 bool name_known;
1598
1599 bacpy(&data.bdaddr, &info->bdaddr);
1600 data.pscan_rep_mode = info->pscan_rep_mode;
1601 data.pscan_period_mode = info->pscan_period_mode;
1602 data.pscan_mode = info->pscan_mode;
1603 memcpy(data.dev_class, info->dev_class, 3);
1604 data.clock_offset = info->clock_offset;
1605 data.rssi = 0x00;
1606 data.ssp_mode = 0x00;
1607
1608 name_known = hci_inquiry_cache_update(hdev, &data, false);
1609 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1610 info->dev_class, 0, !name_known, NULL);
1611 }
1612
1613 hci_dev_unlock(hdev);
1614 }
1615
1616 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1617 {
1618 struct hci_ev_conn_complete *ev = (void *) skb->data;
1619 struct hci_conn *conn;
1620
1621 BT_DBG("%s", hdev->name);
1622
1623 hci_dev_lock(hdev);
1624
1625 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1626 if (!conn) {
1627 if (ev->link_type != SCO_LINK)
1628 goto unlock;
1629
1630 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1631 if (!conn)
1632 goto unlock;
1633
1634 conn->type = SCO_LINK;
1635 }
1636
1637 if (!ev->status) {
1638 conn->handle = __le16_to_cpu(ev->handle);
1639
1640 if (conn->type == ACL_LINK) {
1641 conn->state = BT_CONFIG;
1642 hci_conn_hold(conn);
1643 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1644 mgmt_connected(hdev, &ev->bdaddr, conn->type,
1645 conn->dst_type);
1646 } else
1647 conn->state = BT_CONNECTED;
1648
1649 hci_conn_hold_device(conn);
1650 hci_conn_add_sysfs(conn);
1651
1652 if (test_bit(HCI_AUTH, &hdev->flags))
1653 conn->link_mode |= HCI_LM_AUTH;
1654
1655 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1656 conn->link_mode |= HCI_LM_ENCRYPT;
1657
1658 /* Get remote features */
1659 if (conn->type == ACL_LINK) {
1660 struct hci_cp_read_remote_features cp;
1661 cp.handle = ev->handle;
1662 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1663 sizeof(cp), &cp);
1664 }
1665
1666 /* Set packet type for incoming connection */
1667 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1668 struct hci_cp_change_conn_ptype cp;
1669 cp.handle = ev->handle;
1670 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1671 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1672 sizeof(cp), &cp);
1673 }
1674 } else {
1675 conn->state = BT_CLOSED;
1676 if (conn->type == ACL_LINK)
1677 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1678 conn->dst_type, ev->status);
1679 }
1680
1681 if (conn->type == ACL_LINK)
1682 hci_sco_setup(conn, ev->status);
1683
1684 if (ev->status) {
1685 hci_proto_connect_cfm(conn, ev->status);
1686 hci_conn_del(conn);
1687 } else if (ev->link_type != ACL_LINK)
1688 hci_proto_connect_cfm(conn, ev->status);
1689
1690 unlock:
1691 hci_dev_unlock(hdev);
1692
1693 hci_conn_check_pending(hdev);
1694 }
1695
1696 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1697 {
1698 struct hci_ev_conn_request *ev = (void *) skb->data;
1699 int mask = hdev->link_mode;
1700
1701 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1702 batostr(&ev->bdaddr), ev->link_type);
1703
1704 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1705
1706 if ((mask & HCI_LM_ACCEPT) &&
1707 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1708 /* Connection accepted */
1709 struct inquiry_entry *ie;
1710 struct hci_conn *conn;
1711
1712 hci_dev_lock(hdev);
1713
1714 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1715 if (ie)
1716 memcpy(ie->data.dev_class, ev->dev_class, 3);
1717
1718 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1719 if (!conn) {
1720 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1721 if (!conn) {
1722 BT_ERR("No memory for new connection");
1723 hci_dev_unlock(hdev);
1724 return;
1725 }
1726 }
1727
1728 memcpy(conn->dev_class, ev->dev_class, 3);
1729 conn->state = BT_CONNECT;
1730
1731 hci_dev_unlock(hdev);
1732
1733 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1734 struct hci_cp_accept_conn_req cp;
1735
1736 bacpy(&cp.bdaddr, &ev->bdaddr);
1737
1738 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1739 cp.role = 0x00; /* Become master */
1740 else
1741 cp.role = 0x01; /* Remain slave */
1742
1743 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1744 sizeof(cp), &cp);
1745 } else {
1746 struct hci_cp_accept_sync_conn_req cp;
1747
1748 bacpy(&cp.bdaddr, &ev->bdaddr);
1749 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1750
1751 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1752 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1753 cp.max_latency = cpu_to_le16(0xffff);
1754 cp.content_format = cpu_to_le16(hdev->voice_setting);
1755 cp.retrans_effort = 0xff;
1756
1757 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1758 sizeof(cp), &cp);
1759 }
1760 } else {
1761 /* Connection rejected */
1762 struct hci_cp_reject_conn_req cp;
1763
1764 bacpy(&cp.bdaddr, &ev->bdaddr);
1765 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1766 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1767 }
1768 }
1769
1770 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1771 {
1772 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1773 struct hci_conn *conn;
1774
1775 BT_DBG("%s status %d", hdev->name, ev->status);
1776
1777 hci_dev_lock(hdev);
1778
1779 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1780 if (!conn)
1781 goto unlock;
1782
1783 if (ev->status == 0)
1784 conn->state = BT_CLOSED;
1785
1786 if (conn->type == ACL_LINK || conn->type == LE_LINK) {
1787 if (ev->status != 0)
1788 mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
1789 else
1790 mgmt_disconnected(hdev, &conn->dst, conn->type,
1791 conn->dst_type);
1792 }
1793
1794 if (ev->status == 0) {
1795 hci_proto_disconn_cfm(conn, ev->reason);
1796 hci_conn_del(conn);
1797 }
1798
1799 unlock:
1800 hci_dev_unlock(hdev);
1801 }
1802
1803 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1804 {
1805 struct hci_ev_auth_complete *ev = (void *) skb->data;
1806 struct hci_conn *conn;
1807
1808 BT_DBG("%s status %d", hdev->name, ev->status);
1809
1810 hci_dev_lock(hdev);
1811
1812 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1813 if (!conn)
1814 goto unlock;
1815
1816 if (!ev->status) {
1817 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
1818 test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) {
1819 BT_INFO("re-auth of legacy device is not possible.");
1820 } else {
1821 conn->link_mode |= HCI_LM_AUTH;
1822 conn->sec_level = conn->pending_sec_level;
1823 }
1824 } else {
1825 mgmt_auth_failed(hdev, &conn->dst, ev->status);
1826 }
1827
1828 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1829 clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
1830
1831 if (conn->state == BT_CONFIG) {
1832 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
1833 struct hci_cp_set_conn_encrypt cp;
1834 cp.handle = ev->handle;
1835 cp.encrypt = 0x01;
1836 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1837 &cp);
1838 } else {
1839 conn->state = BT_CONNECTED;
1840 hci_proto_connect_cfm(conn, ev->status);
1841 hci_conn_put(conn);
1842 }
1843 } else {
1844 hci_auth_cfm(conn, ev->status);
1845
1846 hci_conn_hold(conn);
1847 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1848 hci_conn_put(conn);
1849 }
1850
1851 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1852 if (!ev->status) {
1853 struct hci_cp_set_conn_encrypt cp;
1854 cp.handle = ev->handle;
1855 cp.encrypt = 0x01;
1856 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1857 &cp);
1858 } else {
1859 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1860 hci_encrypt_cfm(conn, ev->status, 0x00);
1861 }
1862 }
1863
1864 unlock:
1865 hci_dev_unlock(hdev);
1866 }
1867
1868 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1869 {
1870 struct hci_ev_remote_name *ev = (void *) skb->data;
1871 struct hci_conn *conn;
1872
1873 BT_DBG("%s", hdev->name);
1874
1875 hci_conn_check_pending(hdev);
1876
1877 hci_dev_lock(hdev);
1878
1879 if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
1880 if (ev->status == 0)
1881 mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
1882
1883 hci_resolve_next_name(hdev, &ev->bdaddr);
1884 }
1885
1886 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1887 if (!conn)
1888 goto unlock;
1889
1890 if (!hci_outgoing_auth_needed(hdev, conn))
1891 goto unlock;
1892
1893 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1894 struct hci_cp_auth_requested cp;
1895 cp.handle = __cpu_to_le16(conn->handle);
1896 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1897 }
1898
1899 unlock:
1900 hci_dev_unlock(hdev);
1901 }
1902
1903 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1904 {
1905 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1906 struct hci_conn *conn;
1907
1908 BT_DBG("%s status %d", hdev->name, ev->status);
1909
1910 hci_dev_lock(hdev);
1911
1912 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1913 if (conn) {
1914 if (!ev->status) {
1915 if (ev->encrypt) {
1916 /* Encryption implies authentication */
1917 conn->link_mode |= HCI_LM_AUTH;
1918 conn->link_mode |= HCI_LM_ENCRYPT;
1919 conn->sec_level = conn->pending_sec_level;
1920 } else
1921 conn->link_mode &= ~HCI_LM_ENCRYPT;
1922 }
1923
1924 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1925
1926 if (conn->state == BT_CONFIG) {
1927 if (!ev->status)
1928 conn->state = BT_CONNECTED;
1929
1930 hci_proto_connect_cfm(conn, ev->status);
1931 hci_conn_put(conn);
1932 } else
1933 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1934 }
1935
1936 hci_dev_unlock(hdev);
1937 }
1938
1939 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1940 {
1941 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1942 struct hci_conn *conn;
1943
1944 BT_DBG("%s status %d", hdev->name, ev->status);
1945
1946 hci_dev_lock(hdev);
1947
1948 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1949 if (conn) {
1950 if (!ev->status)
1951 conn->link_mode |= HCI_LM_SECURE;
1952
1953 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1954
1955 hci_key_change_cfm(conn, ev->status);
1956 }
1957
1958 hci_dev_unlock(hdev);
1959 }
1960
1961 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1962 {
1963 struct hci_ev_remote_features *ev = (void *) skb->data;
1964 struct hci_conn *conn;
1965
1966 BT_DBG("%s status %d", hdev->name, ev->status);
1967
1968 hci_dev_lock(hdev);
1969
1970 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1971 if (!conn)
1972 goto unlock;
1973
1974 if (!ev->status)
1975 memcpy(conn->features, ev->features, 8);
1976
1977 if (conn->state != BT_CONFIG)
1978 goto unlock;
1979
1980 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1981 struct hci_cp_read_remote_ext_features cp;
1982 cp.handle = ev->handle;
1983 cp.page = 0x01;
1984 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1985 sizeof(cp), &cp);
1986 goto unlock;
1987 }
1988
1989 if (!ev->status) {
1990 struct hci_cp_remote_name_req cp;
1991 memset(&cp, 0, sizeof(cp));
1992 bacpy(&cp.bdaddr, &conn->dst);
1993 cp.pscan_rep_mode = 0x02;
1994 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1995 }
1996
1997 if (!hci_outgoing_auth_needed(hdev, conn)) {
1998 conn->state = BT_CONNECTED;
1999 hci_proto_connect_cfm(conn, ev->status);
2000 hci_conn_put(conn);
2001 }
2002
2003 unlock:
2004 hci_dev_unlock(hdev);
2005 }
2006
2007 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2008 {
2009 BT_DBG("%s", hdev->name);
2010 }
2011
2012 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2013 {
2014 BT_DBG("%s", hdev->name);
2015 }
2016
2017 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2018 {
2019 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2020 __u16 opcode;
2021
2022 skb_pull(skb, sizeof(*ev));
2023
2024 opcode = __le16_to_cpu(ev->opcode);
2025
2026 switch (opcode) {
2027 case HCI_OP_INQUIRY_CANCEL:
2028 hci_cc_inquiry_cancel(hdev, skb);
2029 break;
2030
2031 case HCI_OP_EXIT_PERIODIC_INQ:
2032 hci_cc_exit_periodic_inq(hdev, skb);
2033 break;
2034
2035 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2036 hci_cc_remote_name_req_cancel(hdev, skb);
2037 break;
2038
2039 case HCI_OP_ROLE_DISCOVERY:
2040 hci_cc_role_discovery(hdev, skb);
2041 break;
2042
2043 case HCI_OP_READ_LINK_POLICY:
2044 hci_cc_read_link_policy(hdev, skb);
2045 break;
2046
2047 case HCI_OP_WRITE_LINK_POLICY:
2048 hci_cc_write_link_policy(hdev, skb);
2049 break;
2050
2051 case HCI_OP_READ_DEF_LINK_POLICY:
2052 hci_cc_read_def_link_policy(hdev, skb);
2053 break;
2054
2055 case HCI_OP_WRITE_DEF_LINK_POLICY:
2056 hci_cc_write_def_link_policy(hdev, skb);
2057 break;
2058
2059 case HCI_OP_RESET:
2060 hci_cc_reset(hdev, skb);
2061 break;
2062
2063 case HCI_OP_WRITE_LOCAL_NAME:
2064 hci_cc_write_local_name(hdev, skb);
2065 break;
2066
2067 case HCI_OP_READ_LOCAL_NAME:
2068 hci_cc_read_local_name(hdev, skb);
2069 break;
2070
2071 case HCI_OP_WRITE_AUTH_ENABLE:
2072 hci_cc_write_auth_enable(hdev, skb);
2073 break;
2074
2075 case HCI_OP_WRITE_ENCRYPT_MODE:
2076 hci_cc_write_encrypt_mode(hdev, skb);
2077 break;
2078
2079 case HCI_OP_WRITE_SCAN_ENABLE:
2080 hci_cc_write_scan_enable(hdev, skb);
2081 break;
2082
2083 case HCI_OP_READ_CLASS_OF_DEV:
2084 hci_cc_read_class_of_dev(hdev, skb);
2085 break;
2086
2087 case HCI_OP_WRITE_CLASS_OF_DEV:
2088 hci_cc_write_class_of_dev(hdev, skb);
2089 break;
2090
2091 case HCI_OP_READ_VOICE_SETTING:
2092 hci_cc_read_voice_setting(hdev, skb);
2093 break;
2094
2095 case HCI_OP_WRITE_VOICE_SETTING:
2096 hci_cc_write_voice_setting(hdev, skb);
2097 break;
2098
2099 case HCI_OP_HOST_BUFFER_SIZE:
2100 hci_cc_host_buffer_size(hdev, skb);
2101 break;
2102
2103 case HCI_OP_READ_SSP_MODE:
2104 hci_cc_read_ssp_mode(hdev, skb);
2105 break;
2106
2107 case HCI_OP_WRITE_SSP_MODE:
2108 hci_cc_write_ssp_mode(hdev, skb);
2109 break;
2110
2111 case HCI_OP_READ_LOCAL_VERSION:
2112 hci_cc_read_local_version(hdev, skb);
2113 break;
2114
2115 case HCI_OP_READ_LOCAL_COMMANDS:
2116 hci_cc_read_local_commands(hdev, skb);
2117 break;
2118
2119 case HCI_OP_READ_LOCAL_FEATURES:
2120 hci_cc_read_local_features(hdev, skb);
2121 break;
2122
2123 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2124 hci_cc_read_local_ext_features(hdev, skb);
2125 break;
2126
2127 case HCI_OP_READ_BUFFER_SIZE:
2128 hci_cc_read_buffer_size(hdev, skb);
2129 break;
2130
2131 case HCI_OP_READ_BD_ADDR:
2132 hci_cc_read_bd_addr(hdev, skb);
2133 break;
2134
2135 case HCI_OP_READ_DATA_BLOCK_SIZE:
2136 hci_cc_read_data_block_size(hdev, skb);
2137 break;
2138
2139 case HCI_OP_WRITE_CA_TIMEOUT:
2140 hci_cc_write_ca_timeout(hdev, skb);
2141 break;
2142
2143 case HCI_OP_READ_FLOW_CONTROL_MODE:
2144 hci_cc_read_flow_control_mode(hdev, skb);
2145 break;
2146
2147 case HCI_OP_READ_LOCAL_AMP_INFO:
2148 hci_cc_read_local_amp_info(hdev, skb);
2149 break;
2150
2151 case HCI_OP_DELETE_STORED_LINK_KEY:
2152 hci_cc_delete_stored_link_key(hdev, skb);
2153 break;
2154
2155 case HCI_OP_SET_EVENT_MASK:
2156 hci_cc_set_event_mask(hdev, skb);
2157 break;
2158
2159 case HCI_OP_WRITE_INQUIRY_MODE:
2160 hci_cc_write_inquiry_mode(hdev, skb);
2161 break;
2162
2163 case HCI_OP_READ_INQ_RSP_TX_POWER:
2164 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2165 break;
2166
2167 case HCI_OP_SET_EVENT_FLT:
2168 hci_cc_set_event_flt(hdev, skb);
2169 break;
2170
2171 case HCI_OP_PIN_CODE_REPLY:
2172 hci_cc_pin_code_reply(hdev, skb);
2173 break;
2174
2175 case HCI_OP_PIN_CODE_NEG_REPLY:
2176 hci_cc_pin_code_neg_reply(hdev, skb);
2177 break;
2178
2179 case HCI_OP_READ_LOCAL_OOB_DATA:
2180 hci_cc_read_local_oob_data_reply(hdev, skb);
2181 break;
2182
2183 case HCI_OP_LE_READ_BUFFER_SIZE:
2184 hci_cc_le_read_buffer_size(hdev, skb);
2185 break;
2186
2187 case HCI_OP_USER_CONFIRM_REPLY:
2188 hci_cc_user_confirm_reply(hdev, skb);
2189 break;
2190
2191 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2192 hci_cc_user_confirm_neg_reply(hdev, skb);
2193 break;
2194
2195 case HCI_OP_USER_PASSKEY_REPLY:
2196 hci_cc_user_passkey_reply(hdev, skb);
2197 break;
2198
2199 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2200 hci_cc_user_passkey_neg_reply(hdev, skb);
2201
2202 case HCI_OP_LE_SET_SCAN_PARAM:
2203 hci_cc_le_set_scan_param(hdev, skb);
2204 break;
2205
2206 case HCI_OP_LE_SET_SCAN_ENABLE:
2207 hci_cc_le_set_scan_enable(hdev, skb);
2208 break;
2209
2210 case HCI_OP_LE_LTK_REPLY:
2211 hci_cc_le_ltk_reply(hdev, skb);
2212 break;
2213
2214 case HCI_OP_LE_LTK_NEG_REPLY:
2215 hci_cc_le_ltk_neg_reply(hdev, skb);
2216 break;
2217
2218 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2219 hci_cc_write_le_host_supported(hdev, skb);
2220 break;
2221
2222 default:
2223 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2224 break;
2225 }
2226
2227 if (ev->opcode != HCI_OP_NOP)
2228 del_timer(&hdev->cmd_timer);
2229
2230 if (ev->ncmd) {
2231 atomic_set(&hdev->cmd_cnt, 1);
2232 if (!skb_queue_empty(&hdev->cmd_q))
2233 queue_work(hdev->workqueue, &hdev->cmd_work);
2234 }
2235 }
2236
2237 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2238 {
2239 struct hci_ev_cmd_status *ev = (void *) skb->data;
2240 __u16 opcode;
2241
2242 skb_pull(skb, sizeof(*ev));
2243
2244 opcode = __le16_to_cpu(ev->opcode);
2245
2246 switch (opcode) {
2247 case HCI_OP_INQUIRY:
2248 hci_cs_inquiry(hdev, ev->status);
2249 break;
2250
2251 case HCI_OP_CREATE_CONN:
2252 hci_cs_create_conn(hdev, ev->status);
2253 break;
2254
2255 case HCI_OP_ADD_SCO:
2256 hci_cs_add_sco(hdev, ev->status);
2257 break;
2258
2259 case HCI_OP_AUTH_REQUESTED:
2260 hci_cs_auth_requested(hdev, ev->status);
2261 break;
2262
2263 case HCI_OP_SET_CONN_ENCRYPT:
2264 hci_cs_set_conn_encrypt(hdev, ev->status);
2265 break;
2266
2267 case HCI_OP_REMOTE_NAME_REQ:
2268 hci_cs_remote_name_req(hdev, ev->status);
2269 break;
2270
2271 case HCI_OP_READ_REMOTE_FEATURES:
2272 hci_cs_read_remote_features(hdev, ev->status);
2273 break;
2274
2275 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2276 hci_cs_read_remote_ext_features(hdev, ev->status);
2277 break;
2278
2279 case HCI_OP_SETUP_SYNC_CONN:
2280 hci_cs_setup_sync_conn(hdev, ev->status);
2281 break;
2282
2283 case HCI_OP_SNIFF_MODE:
2284 hci_cs_sniff_mode(hdev, ev->status);
2285 break;
2286
2287 case HCI_OP_EXIT_SNIFF_MODE:
2288 hci_cs_exit_sniff_mode(hdev, ev->status);
2289 break;
2290
2291 case HCI_OP_DISCONNECT:
2292 if (ev->status != 0)
2293 mgmt_disconnect_failed(hdev, NULL, ev->status);
2294 break;
2295
2296 case HCI_OP_LE_CREATE_CONN:
2297 hci_cs_le_create_conn(hdev, ev->status);
2298 break;
2299
2300 case HCI_OP_LE_START_ENC:
2301 hci_cs_le_start_enc(hdev, ev->status);
2302 break;
2303
2304 default:
2305 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2306 break;
2307 }
2308
2309 if (ev->opcode != HCI_OP_NOP)
2310 del_timer(&hdev->cmd_timer);
2311
2312 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2313 atomic_set(&hdev->cmd_cnt, 1);
2314 if (!skb_queue_empty(&hdev->cmd_q))
2315 queue_work(hdev->workqueue, &hdev->cmd_work);
2316 }
2317 }
2318
2319 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2320 {
2321 struct hci_ev_role_change *ev = (void *) skb->data;
2322 struct hci_conn *conn;
2323
2324 BT_DBG("%s status %d", hdev->name, ev->status);
2325
2326 hci_dev_lock(hdev);
2327
2328 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2329 if (conn) {
2330 if (!ev->status) {
2331 if (ev->role)
2332 conn->link_mode &= ~HCI_LM_MASTER;
2333 else
2334 conn->link_mode |= HCI_LM_MASTER;
2335 }
2336
2337 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
2338
2339 hci_role_switch_cfm(conn, ev->status, ev->role);
2340 }
2341
2342 hci_dev_unlock(hdev);
2343 }
2344
2345 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2346 {
2347 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2348 int i;
2349
2350 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2351 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2352 return;
2353 }
2354
2355 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2356 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2357 BT_DBG("%s bad parameters", hdev->name);
2358 return;
2359 }
2360
2361 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2362
2363 for (i = 0; i < ev->num_hndl; i++) {
2364 struct hci_comp_pkts_info *info = &ev->handles[i];
2365 struct hci_conn *conn;
2366 __u16 handle, count;
2367
2368 handle = __le16_to_cpu(info->handle);
2369 count = __le16_to_cpu(info->count);
2370
2371 conn = hci_conn_hash_lookup_handle(hdev, handle);
2372 if (!conn)
2373 continue;
2374
2375 conn->sent -= count;
2376
2377 switch (conn->type) {
2378 case ACL_LINK:
2379 hdev->acl_cnt += count;
2380 if (hdev->acl_cnt > hdev->acl_pkts)
2381 hdev->acl_cnt = hdev->acl_pkts;
2382 break;
2383
2384 case LE_LINK:
2385 if (hdev->le_pkts) {
2386 hdev->le_cnt += count;
2387 if (hdev->le_cnt > hdev->le_pkts)
2388 hdev->le_cnt = hdev->le_pkts;
2389 } else {
2390 hdev->acl_cnt += count;
2391 if (hdev->acl_cnt > hdev->acl_pkts)
2392 hdev->acl_cnt = hdev->acl_pkts;
2393 }
2394 break;
2395
2396 case SCO_LINK:
2397 hdev->sco_cnt += count;
2398 if (hdev->sco_cnt > hdev->sco_pkts)
2399 hdev->sco_cnt = hdev->sco_pkts;
2400 break;
2401
2402 default:
2403 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2404 break;
2405 }
2406 }
2407
2408 queue_work(hdev->workqueue, &hdev->tx_work);
2409 }
2410
2411 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2412 struct sk_buff *skb)
2413 {
2414 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2415 int i;
2416
2417 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2418 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2419 return;
2420 }
2421
2422 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2423 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2424 BT_DBG("%s bad parameters", hdev->name);
2425 return;
2426 }
2427
2428 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2429 ev->num_hndl);
2430
2431 for (i = 0; i < ev->num_hndl; i++) {
2432 struct hci_comp_blocks_info *info = &ev->handles[i];
2433 struct hci_conn *conn;
2434 __u16 handle, block_count;
2435
2436 handle = __le16_to_cpu(info->handle);
2437 block_count = __le16_to_cpu(info->blocks);
2438
2439 conn = hci_conn_hash_lookup_handle(hdev, handle);
2440 if (!conn)
2441 continue;
2442
2443 conn->sent -= block_count;
2444
2445 switch (conn->type) {
2446 case ACL_LINK:
2447 hdev->block_cnt += block_count;
2448 if (hdev->block_cnt > hdev->num_blocks)
2449 hdev->block_cnt = hdev->num_blocks;
2450 break;
2451
2452 default:
2453 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2454 break;
2455 }
2456 }
2457
2458 queue_work(hdev->workqueue, &hdev->tx_work);
2459 }
2460
2461 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2462 {
2463 struct hci_ev_mode_change *ev = (void *) skb->data;
2464 struct hci_conn *conn;
2465
2466 BT_DBG("%s status %d", hdev->name, ev->status);
2467
2468 hci_dev_lock(hdev);
2469
2470 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2471 if (conn) {
2472 conn->mode = ev->mode;
2473 conn->interval = __le16_to_cpu(ev->interval);
2474
2475 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
2476 if (conn->mode == HCI_CM_ACTIVE)
2477 conn->power_save = 1;
2478 else
2479 conn->power_save = 0;
2480 }
2481
2482 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
2483 hci_sco_setup(conn, ev->status);
2484 }
2485
2486 hci_dev_unlock(hdev);
2487 }
2488
2489 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2490 {
2491 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2492 struct hci_conn *conn;
2493
2494 BT_DBG("%s", hdev->name);
2495
2496 hci_dev_lock(hdev);
2497
2498 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2499 if (!conn)
2500 goto unlock;
2501
2502 if (conn->state == BT_CONNECTED) {
2503 hci_conn_hold(conn);
2504 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2505 hci_conn_put(conn);
2506 }
2507
2508 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2509 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2510 sizeof(ev->bdaddr), &ev->bdaddr);
2511 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2512 u8 secure;
2513
2514 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2515 secure = 1;
2516 else
2517 secure = 0;
2518
2519 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2520 }
2521
2522 unlock:
2523 hci_dev_unlock(hdev);
2524 }
2525
2526 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2527 {
2528 struct hci_ev_link_key_req *ev = (void *) skb->data;
2529 struct hci_cp_link_key_reply cp;
2530 struct hci_conn *conn;
2531 struct link_key *key;
2532
2533 BT_DBG("%s", hdev->name);
2534
2535 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2536 return;
2537
2538 hci_dev_lock(hdev);
2539
2540 key = hci_find_link_key(hdev, &ev->bdaddr);
2541 if (!key) {
2542 BT_DBG("%s link key not found for %s", hdev->name,
2543 batostr(&ev->bdaddr));
2544 goto not_found;
2545 }
2546
2547 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2548 batostr(&ev->bdaddr));
2549
2550 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2551 key->type == HCI_LK_DEBUG_COMBINATION) {
2552 BT_DBG("%s ignoring debug key", hdev->name);
2553 goto not_found;
2554 }
2555
2556 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2557 if (conn) {
2558 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2559 conn->auth_type != 0xff &&
2560 (conn->auth_type & 0x01)) {
2561 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2562 goto not_found;
2563 }
2564
2565 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2566 conn->pending_sec_level == BT_SECURITY_HIGH) {
2567 BT_DBG("%s ignoring key unauthenticated for high \
2568 security", hdev->name);
2569 goto not_found;
2570 }
2571
2572 conn->key_type = key->type;
2573 conn->pin_length = key->pin_len;
2574 }
2575
2576 bacpy(&cp.bdaddr, &ev->bdaddr);
2577 memcpy(cp.link_key, key->val, 16);
2578
2579 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2580
2581 hci_dev_unlock(hdev);
2582
2583 return;
2584
2585 not_found:
2586 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2587 hci_dev_unlock(hdev);
2588 }
2589
2590 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2591 {
2592 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2593 struct hci_conn *conn;
2594 u8 pin_len = 0;
2595
2596 BT_DBG("%s", hdev->name);
2597
2598 hci_dev_lock(hdev);
2599
2600 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2601 if (conn) {
2602 hci_conn_hold(conn);
2603 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2604 pin_len = conn->pin_length;
2605
2606 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2607 conn->key_type = ev->key_type;
2608
2609 hci_conn_put(conn);
2610 }
2611
2612 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2613 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2614 ev->key_type, pin_len);
2615
2616 hci_dev_unlock(hdev);
2617 }
2618
2619 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2620 {
2621 struct hci_ev_clock_offset *ev = (void *) skb->data;
2622 struct hci_conn *conn;
2623
2624 BT_DBG("%s status %d", hdev->name, ev->status);
2625
2626 hci_dev_lock(hdev);
2627
2628 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2629 if (conn && !ev->status) {
2630 struct inquiry_entry *ie;
2631
2632 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2633 if (ie) {
2634 ie->data.clock_offset = ev->clock_offset;
2635 ie->timestamp = jiffies;
2636 }
2637 }
2638
2639 hci_dev_unlock(hdev);
2640 }
2641
2642 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2643 {
2644 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2645 struct hci_conn *conn;
2646
2647 BT_DBG("%s status %d", hdev->name, ev->status);
2648
2649 hci_dev_lock(hdev);
2650
2651 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2652 if (conn && !ev->status)
2653 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2654
2655 hci_dev_unlock(hdev);
2656 }
2657
2658 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2659 {
2660 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2661 struct inquiry_entry *ie;
2662
2663 BT_DBG("%s", hdev->name);
2664
2665 hci_dev_lock(hdev);
2666
2667 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2668 if (ie) {
2669 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2670 ie->timestamp = jiffies;
2671 }
2672
2673 hci_dev_unlock(hdev);
2674 }
2675
2676 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2677 {
2678 struct inquiry_data data;
2679 int num_rsp = *((__u8 *) skb->data);
2680 bool name_known;
2681
2682 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2683
2684 if (!num_rsp)
2685 return;
2686
2687 hci_dev_lock(hdev);
2688
2689 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2690 struct inquiry_info_with_rssi_and_pscan_mode *info;
2691 info = (void *) (skb->data + 1);
2692
2693 for (; num_rsp; num_rsp--, info++) {
2694 bacpy(&data.bdaddr, &info->bdaddr);
2695 data.pscan_rep_mode = info->pscan_rep_mode;
2696 data.pscan_period_mode = info->pscan_period_mode;
2697 data.pscan_mode = info->pscan_mode;
2698 memcpy(data.dev_class, info->dev_class, 3);
2699 data.clock_offset = info->clock_offset;
2700 data.rssi = info->rssi;
2701 data.ssp_mode = 0x00;
2702
2703 name_known = hci_inquiry_cache_update(hdev, &data,
2704 false);
2705 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2706 info->dev_class, info->rssi,
2707 !name_known, NULL);
2708 }
2709 } else {
2710 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2711
2712 for (; num_rsp; num_rsp--, info++) {
2713 bacpy(&data.bdaddr, &info->bdaddr);
2714 data.pscan_rep_mode = info->pscan_rep_mode;
2715 data.pscan_period_mode = info->pscan_period_mode;
2716 data.pscan_mode = 0x00;
2717 memcpy(data.dev_class, info->dev_class, 3);
2718 data.clock_offset = info->clock_offset;
2719 data.rssi = info->rssi;
2720 data.ssp_mode = 0x00;
2721 name_known = hci_inquiry_cache_update(hdev, &data,
2722 false);
2723 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2724 info->dev_class, info->rssi,
2725 !name_known, NULL);
2726 }
2727 }
2728
2729 hci_dev_unlock(hdev);
2730 }
2731
2732 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2733 {
2734 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2735 struct hci_conn *conn;
2736
2737 BT_DBG("%s", hdev->name);
2738
2739 hci_dev_lock(hdev);
2740
2741 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2742 if (!conn)
2743 goto unlock;
2744
2745 if (!ev->status && ev->page == 0x01) {
2746 struct inquiry_entry *ie;
2747
2748 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2749 if (ie)
2750 ie->data.ssp_mode = (ev->features[0] & 0x01);
2751
2752 conn->ssp_mode = (ev->features[0] & 0x01);
2753 }
2754
2755 if (conn->state != BT_CONFIG)
2756 goto unlock;
2757
2758 if (!ev->status) {
2759 struct hci_cp_remote_name_req cp;
2760 memset(&cp, 0, sizeof(cp));
2761 bacpy(&cp.bdaddr, &conn->dst);
2762 cp.pscan_rep_mode = 0x02;
2763 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2764 }
2765
2766 if (!hci_outgoing_auth_needed(hdev, conn)) {
2767 conn->state = BT_CONNECTED;
2768 hci_proto_connect_cfm(conn, ev->status);
2769 hci_conn_put(conn);
2770 }
2771
2772 unlock:
2773 hci_dev_unlock(hdev);
2774 }
2775
2776 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2777 {
2778 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2779 struct hci_conn *conn;
2780
2781 BT_DBG("%s status %d", hdev->name, ev->status);
2782
2783 hci_dev_lock(hdev);
2784
2785 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2786 if (!conn) {
2787 if (ev->link_type == ESCO_LINK)
2788 goto unlock;
2789
2790 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2791 if (!conn)
2792 goto unlock;
2793
2794 conn->type = SCO_LINK;
2795 }
2796
2797 switch (ev->status) {
2798 case 0x00:
2799 conn->handle = __le16_to_cpu(ev->handle);
2800 conn->state = BT_CONNECTED;
2801
2802 hci_conn_hold_device(conn);
2803 hci_conn_add_sysfs(conn);
2804 break;
2805
2806 case 0x11: /* Unsupported Feature or Parameter Value */
2807 case 0x1c: /* SCO interval rejected */
2808 case 0x1a: /* Unsupported Remote Feature */
2809 case 0x1f: /* Unspecified error */
2810 if (conn->out && conn->attempt < 2) {
2811 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2812 (hdev->esco_type & EDR_ESCO_MASK);
2813 hci_setup_sync(conn, conn->link->handle);
2814 goto unlock;
2815 }
2816 /* fall through */
2817
2818 default:
2819 conn->state = BT_CLOSED;
2820 break;
2821 }
2822
2823 hci_proto_connect_cfm(conn, ev->status);
2824 if (ev->status)
2825 hci_conn_del(conn);
2826
2827 unlock:
2828 hci_dev_unlock(hdev);
2829 }
2830
2831 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2832 {
2833 BT_DBG("%s", hdev->name);
2834 }
2835
2836 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2837 {
2838 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2839
2840 BT_DBG("%s status %d", hdev->name, ev->status);
2841 }
2842
2843 static inline bool eir_has_complete_name(u8 *data, size_t data_len)
2844 {
2845 u8 field_len;
2846 size_t parsed;
2847
2848 for (parsed = 0; parsed < data_len - 1; parsed += field_len) {
2849 field_len = data[0];
2850
2851 if (field_len == 0)
2852 break;
2853
2854 parsed += field_len + 1;
2855
2856 if (parsed > data_len)
2857 break;
2858
2859 if (data[1] == EIR_NAME_COMPLETE)
2860 return true;
2861
2862 data += field_len + 1;
2863 }
2864
2865 return false;
2866 }
2867
2868 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2869 {
2870 struct inquiry_data data;
2871 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2872 int num_rsp = *((__u8 *) skb->data);
2873
2874 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2875
2876 if (!num_rsp)
2877 return;
2878
2879 hci_dev_lock(hdev);
2880
2881 for (; num_rsp; num_rsp--, info++) {
2882 bool name_known;
2883
2884 bacpy(&data.bdaddr, &info->bdaddr);
2885 data.pscan_rep_mode = info->pscan_rep_mode;
2886 data.pscan_period_mode = info->pscan_period_mode;
2887 data.pscan_mode = 0x00;
2888 memcpy(data.dev_class, info->dev_class, 3);
2889 data.clock_offset = info->clock_offset;
2890 data.rssi = info->rssi;
2891 data.ssp_mode = 0x01;
2892
2893 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2894 name_known = eir_has_complete_name(info->data,
2895 sizeof(info->data));
2896 else
2897 name_known = true;
2898
2899 name_known = hci_inquiry_cache_update(hdev, &data, name_known);
2900 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2901 info->dev_class, info->rssi,
2902 !name_known, info->data);
2903 }
2904
2905 hci_dev_unlock(hdev);
2906 }
2907
2908 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2909 {
2910 /* If remote requests dedicated bonding follow that lead */
2911 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2912 /* If both remote and local IO capabilities allow MITM
2913 * protection then require it, otherwise don't */
2914 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2915 return 0x02;
2916 else
2917 return 0x03;
2918 }
2919
2920 /* If remote requests no-bonding follow that lead */
2921 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2922 return conn->remote_auth | (conn->auth_type & 0x01);
2923
2924 return conn->auth_type;
2925 }
2926
2927 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2928 {
2929 struct hci_ev_io_capa_request *ev = (void *) skb->data;
2930 struct hci_conn *conn;
2931
2932 BT_DBG("%s", hdev->name);
2933
2934 hci_dev_lock(hdev);
2935
2936 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2937 if (!conn)
2938 goto unlock;
2939
2940 hci_conn_hold(conn);
2941
2942 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2943 goto unlock;
2944
2945 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
2946 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2947 struct hci_cp_io_capability_reply cp;
2948
2949 bacpy(&cp.bdaddr, &ev->bdaddr);
2950 cp.capability = conn->io_capability;
2951 conn->auth_type = hci_get_auth_req(conn);
2952 cp.authentication = conn->auth_type;
2953
2954 if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2955 hci_find_remote_oob_data(hdev, &conn->dst))
2956 cp.oob_data = 0x01;
2957 else
2958 cp.oob_data = 0x00;
2959
2960 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2961 sizeof(cp), &cp);
2962 } else {
2963 struct hci_cp_io_capability_neg_reply cp;
2964
2965 bacpy(&cp.bdaddr, &ev->bdaddr);
2966 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
2967
2968 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2969 sizeof(cp), &cp);
2970 }
2971
2972 unlock:
2973 hci_dev_unlock(hdev);
2974 }
2975
2976 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2977 {
2978 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2979 struct hci_conn *conn;
2980
2981 BT_DBG("%s", hdev->name);
2982
2983 hci_dev_lock(hdev);
2984
2985 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2986 if (!conn)
2987 goto unlock;
2988
2989 conn->remote_cap = ev->capability;
2990 conn->remote_oob = ev->oob_data;
2991 conn->remote_auth = ev->authentication;
2992
2993 unlock:
2994 hci_dev_unlock(hdev);
2995 }
2996
2997 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2998 struct sk_buff *skb)
2999 {
3000 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3001 int loc_mitm, rem_mitm, confirm_hint = 0;
3002 struct hci_conn *conn;
3003
3004 BT_DBG("%s", hdev->name);
3005
3006 hci_dev_lock(hdev);
3007
3008 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3009 goto unlock;
3010
3011 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3012 if (!conn)
3013 goto unlock;
3014
3015 loc_mitm = (conn->auth_type & 0x01);
3016 rem_mitm = (conn->remote_auth & 0x01);
3017
3018 /* If we require MITM but the remote device can't provide that
3019 * (it has NoInputNoOutput) then reject the confirmation
3020 * request. The only exception is when we're dedicated bonding
3021 * initiators (connect_cfm_cb set) since then we always have the MITM
3022 * bit set. */
3023 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3024 BT_DBG("Rejecting request: remote device can't provide MITM");
3025 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3026 sizeof(ev->bdaddr), &ev->bdaddr);
3027 goto unlock;
3028 }
3029
3030 /* If no side requires MITM protection; auto-accept */
3031 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3032 (!rem_mitm || conn->io_capability == 0x03)) {
3033
3034 /* If we're not the initiators request authorization to
3035 * proceed from user space (mgmt_user_confirm with
3036 * confirm_hint set to 1). */
3037 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
3038 BT_DBG("Confirming auto-accept as acceptor");
3039 confirm_hint = 1;
3040 goto confirm;
3041 }
3042
3043 BT_DBG("Auto-accept of user confirmation with %ums delay",
3044 hdev->auto_accept_delay);
3045
3046 if (hdev->auto_accept_delay > 0) {
3047 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3048 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3049 goto unlock;
3050 }
3051
3052 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3053 sizeof(ev->bdaddr), &ev->bdaddr);
3054 goto unlock;
3055 }
3056
3057 confirm:
3058 mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
3059 confirm_hint);
3060
3061 unlock:
3062 hci_dev_unlock(hdev);
3063 }
3064
3065 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3066 struct sk_buff *skb)
3067 {
3068 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3069
3070 BT_DBG("%s", hdev->name);
3071
3072 hci_dev_lock(hdev);
3073
3074 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3075 mgmt_user_passkey_request(hdev, &ev->bdaddr);
3076
3077 hci_dev_unlock(hdev);
3078 }
3079
3080 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3081 {
3082 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3083 struct hci_conn *conn;
3084
3085 BT_DBG("%s", hdev->name);
3086
3087 hci_dev_lock(hdev);
3088
3089 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3090 if (!conn)
3091 goto unlock;
3092
3093 /* To avoid duplicate auth_failed events to user space we check
3094 * the HCI_CONN_AUTH_PEND flag which will be set if we
3095 * initiated the authentication. A traditional auth_complete
3096 * event gets always produced as initiator and is also mapped to
3097 * the mgmt_auth_failed event */
3098 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
3099 mgmt_auth_failed(hdev, &conn->dst, ev->status);
3100
3101 hci_conn_put(conn);
3102
3103 unlock:
3104 hci_dev_unlock(hdev);
3105 }
3106
3107 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3108 {
3109 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3110 struct inquiry_entry *ie;
3111
3112 BT_DBG("%s", hdev->name);
3113
3114 hci_dev_lock(hdev);
3115
3116 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3117 if (ie)
3118 ie->data.ssp_mode = (ev->features[0] & 0x01);
3119
3120 hci_dev_unlock(hdev);
3121 }
3122
3123 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3124 struct sk_buff *skb)
3125 {
3126 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3127 struct oob_data *data;
3128
3129 BT_DBG("%s", hdev->name);
3130
3131 hci_dev_lock(hdev);
3132
3133 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3134 goto unlock;
3135
3136 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3137 if (data) {
3138 struct hci_cp_remote_oob_data_reply cp;
3139
3140 bacpy(&cp.bdaddr, &ev->bdaddr);
3141 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3142 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3143
3144 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3145 &cp);
3146 } else {
3147 struct hci_cp_remote_oob_data_neg_reply cp;
3148
3149 bacpy(&cp.bdaddr, &ev->bdaddr);
3150 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3151 &cp);
3152 }
3153
3154 unlock:
3155 hci_dev_unlock(hdev);
3156 }
3157
3158 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3159 {
3160 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3161 struct hci_conn *conn;
3162
3163 BT_DBG("%s status %d", hdev->name, ev->status);
3164
3165 hci_dev_lock(hdev);
3166
3167 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3168 if (!conn) {
3169 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3170 if (!conn) {
3171 BT_ERR("No memory for new connection");
3172 hci_dev_unlock(hdev);
3173 return;
3174 }
3175
3176 conn->dst_type = ev->bdaddr_type;
3177 }
3178
3179 if (ev->status) {
3180 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3181 conn->dst_type, ev->status);
3182 hci_proto_connect_cfm(conn, ev->status);
3183 conn->state = BT_CLOSED;
3184 hci_conn_del(conn);
3185 goto unlock;
3186 }
3187
3188 mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type);
3189
3190 conn->sec_level = BT_SECURITY_LOW;
3191 conn->handle = __le16_to_cpu(ev->handle);
3192 conn->state = BT_CONNECTED;
3193
3194 hci_conn_hold_device(conn);
3195 hci_conn_add_sysfs(conn);
3196
3197 hci_proto_connect_cfm(conn, ev->status);
3198
3199 unlock:
3200 hci_dev_unlock(hdev);
3201 }
3202
3203 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3204 struct sk_buff *skb)
3205 {
3206 u8 num_reports = skb->data[0];
3207 void *ptr = &skb->data[1];
3208
3209 hci_dev_lock(hdev);
3210
3211 while (num_reports--) {
3212 struct hci_ev_le_advertising_info *ev = ptr;
3213
3214 hci_add_adv_entry(hdev, ev);
3215
3216 ptr += sizeof(*ev) + ev->length + 1;
3217 }
3218
3219 hci_dev_unlock(hdev);
3220 }
3221
3222 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3223 struct sk_buff *skb)
3224 {
3225 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3226 struct hci_cp_le_ltk_reply cp;
3227 struct hci_cp_le_ltk_neg_reply neg;
3228 struct hci_conn *conn;
3229 struct link_key *ltk;
3230
3231 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3232
3233 hci_dev_lock(hdev);
3234
3235 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3236 if (conn == NULL)
3237 goto not_found;
3238
3239 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3240 if (ltk == NULL)
3241 goto not_found;
3242
3243 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3244 cp.handle = cpu_to_le16(conn->handle);
3245 conn->pin_length = ltk->pin_len;
3246
3247 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3248
3249 hci_dev_unlock(hdev);
3250
3251 return;
3252
3253 not_found:
3254 neg.handle = ev->handle;
3255 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3256 hci_dev_unlock(hdev);
3257 }
3258
3259 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3260 {
3261 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3262
3263 skb_pull(skb, sizeof(*le_ev));
3264
3265 switch (le_ev->subevent) {
3266 case HCI_EV_LE_CONN_COMPLETE:
3267 hci_le_conn_complete_evt(hdev, skb);
3268 break;
3269
3270 case HCI_EV_LE_ADVERTISING_REPORT:
3271 hci_le_adv_report_evt(hdev, skb);
3272 break;
3273
3274 case HCI_EV_LE_LTK_REQ:
3275 hci_le_ltk_request_evt(hdev, skb);
3276 break;
3277
3278 default:
3279 break;
3280 }
3281 }
3282
3283 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3284 {
3285 struct hci_event_hdr *hdr = (void *) skb->data;
3286 __u8 event = hdr->evt;
3287
3288 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3289
3290 switch (event) {
3291 case HCI_EV_INQUIRY_COMPLETE:
3292 hci_inquiry_complete_evt(hdev, skb);
3293 break;
3294
3295 case HCI_EV_INQUIRY_RESULT:
3296 hci_inquiry_result_evt(hdev, skb);
3297 break;
3298
3299 case HCI_EV_CONN_COMPLETE:
3300 hci_conn_complete_evt(hdev, skb);
3301 break;
3302
3303 case HCI_EV_CONN_REQUEST:
3304 hci_conn_request_evt(hdev, skb);
3305 break;
3306
3307 case HCI_EV_DISCONN_COMPLETE:
3308 hci_disconn_complete_evt(hdev, skb);
3309 break;
3310
3311 case HCI_EV_AUTH_COMPLETE:
3312 hci_auth_complete_evt(hdev, skb);
3313 break;
3314
3315 case HCI_EV_REMOTE_NAME:
3316 hci_remote_name_evt(hdev, skb);
3317 break;
3318
3319 case HCI_EV_ENCRYPT_CHANGE:
3320 hci_encrypt_change_evt(hdev, skb);
3321 break;
3322
3323 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3324 hci_change_link_key_complete_evt(hdev, skb);
3325 break;
3326
3327 case HCI_EV_REMOTE_FEATURES:
3328 hci_remote_features_evt(hdev, skb);
3329 break;
3330
3331 case HCI_EV_REMOTE_VERSION:
3332 hci_remote_version_evt(hdev, skb);
3333 break;
3334
3335 case HCI_EV_QOS_SETUP_COMPLETE:
3336 hci_qos_setup_complete_evt(hdev, skb);
3337 break;
3338
3339 case HCI_EV_CMD_COMPLETE:
3340 hci_cmd_complete_evt(hdev, skb);
3341 break;
3342
3343 case HCI_EV_CMD_STATUS:
3344 hci_cmd_status_evt(hdev, skb);
3345 break;
3346
3347 case HCI_EV_ROLE_CHANGE:
3348 hci_role_change_evt(hdev, skb);
3349 break;
3350
3351 case HCI_EV_NUM_COMP_PKTS:
3352 hci_num_comp_pkts_evt(hdev, skb);
3353 break;
3354
3355 case HCI_EV_MODE_CHANGE:
3356 hci_mode_change_evt(hdev, skb);
3357 break;
3358
3359 case HCI_EV_PIN_CODE_REQ:
3360 hci_pin_code_request_evt(hdev, skb);
3361 break;
3362
3363 case HCI_EV_LINK_KEY_REQ:
3364 hci_link_key_request_evt(hdev, skb);
3365 break;
3366
3367 case HCI_EV_LINK_KEY_NOTIFY:
3368 hci_link_key_notify_evt(hdev, skb);
3369 break;
3370
3371 case HCI_EV_CLOCK_OFFSET:
3372 hci_clock_offset_evt(hdev, skb);
3373 break;
3374
3375 case HCI_EV_PKT_TYPE_CHANGE:
3376 hci_pkt_type_change_evt(hdev, skb);
3377 break;
3378
3379 case HCI_EV_PSCAN_REP_MODE:
3380 hci_pscan_rep_mode_evt(hdev, skb);
3381 break;
3382
3383 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3384 hci_inquiry_result_with_rssi_evt(hdev, skb);
3385 break;
3386
3387 case HCI_EV_REMOTE_EXT_FEATURES:
3388 hci_remote_ext_features_evt(hdev, skb);
3389 break;
3390
3391 case HCI_EV_SYNC_CONN_COMPLETE:
3392 hci_sync_conn_complete_evt(hdev, skb);
3393 break;
3394
3395 case HCI_EV_SYNC_CONN_CHANGED:
3396 hci_sync_conn_changed_evt(hdev, skb);
3397 break;
3398
3399 case HCI_EV_SNIFF_SUBRATE:
3400 hci_sniff_subrate_evt(hdev, skb);
3401 break;
3402
3403 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3404 hci_extended_inquiry_result_evt(hdev, skb);
3405 break;
3406
3407 case HCI_EV_IO_CAPA_REQUEST:
3408 hci_io_capa_request_evt(hdev, skb);
3409 break;
3410
3411 case HCI_EV_IO_CAPA_REPLY:
3412 hci_io_capa_reply_evt(hdev, skb);
3413 break;
3414
3415 case HCI_EV_USER_CONFIRM_REQUEST:
3416 hci_user_confirm_request_evt(hdev, skb);
3417 break;
3418
3419 case HCI_EV_USER_PASSKEY_REQUEST:
3420 hci_user_passkey_request_evt(hdev, skb);
3421 break;
3422
3423 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3424 hci_simple_pair_complete_evt(hdev, skb);
3425 break;
3426
3427 case HCI_EV_REMOTE_HOST_FEATURES:
3428 hci_remote_host_features_evt(hdev, skb);
3429 break;
3430
3431 case HCI_EV_LE_META:
3432 hci_le_meta_evt(hdev, skb);
3433 break;
3434
3435 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3436 hci_remote_oob_data_request_evt(hdev, skb);
3437 break;
3438
3439 case HCI_EV_NUM_COMP_BLOCKS:
3440 hci_num_comp_blocks_evt(hdev, skb);
3441 break;
3442
3443 default:
3444 BT_DBG("%s event 0x%x", hdev->name, event);
3445 break;
3446 }
3447
3448 kfree_skb(skb);
3449 hdev->stat.evt_rx++;
3450 }
3451
3452 /* Generate internal stack event */
3453 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3454 {
3455 struct hci_event_hdr *hdr;
3456 struct hci_ev_stack_internal *ev;
3457 struct sk_buff *skb;
3458
3459 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3460 if (!skb)
3461 return;
3462
3463 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3464 hdr->evt = HCI_EV_STACK_INTERNAL;
3465 hdr->plen = sizeof(*ev) + dlen;
3466
3467 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
3468 ev->type = type;
3469 memcpy(ev->data, data, dlen);
3470
3471 bt_cb(skb)->incoming = 1;
3472 __net_timestamp(skb);
3473
3474 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3475 skb->dev = (void *) hdev;
3476 hci_send_to_sock(hdev, skb, NULL);
3477 kfree_skb(skb);
3478 }
3479
3480 module_param(enable_le, bool, 0644);
3481 MODULE_PARM_DESC(enable_le, "Enable LE support");