Bluetooth: Return updated name state with hci_inquiry_cache_update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 static bool enable_le;
49
50 /* Handle HCI Event packets */
51
52 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
53 {
54 __u8 status = *((__u8 *) skb->data);
55
56 BT_DBG("%s status 0x%x", hdev->name, status);
57
58 if (status) {
59 hci_dev_lock(hdev);
60 mgmt_stop_discovery_failed(hdev, status);
61 hci_dev_unlock(hdev);
62 return;
63 }
64
65 clear_bit(HCI_INQUIRY, &hdev->flags);
66
67 hci_dev_lock(hdev);
68 mgmt_discovering(hdev, 0);
69 hci_dev_unlock(hdev);
70
71 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
72
73 hci_conn_check_pending(hdev);
74 }
75
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77 {
78 __u8 status = *((__u8 *) skb->data);
79
80 BT_DBG("%s status 0x%x", hdev->name, status);
81
82 if (status)
83 return;
84
85 hci_conn_check_pending(hdev);
86 }
87
88 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160 {
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163 BT_DBG("%s status 0x%x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 hdev->link_policy = __le16_to_cpu(rp->policy);
169 }
170
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
172 {
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%x", hdev->name, status);
177
178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 if (!sent)
180 return;
181
182 if (!status)
183 hdev->link_policy = get_unaligned_le16(sent);
184
185 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 hci_req_complete(hdev, HCI_OP_RESET, status);
197
198 hdev->dev_flags = 0;
199 }
200
201 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
202 {
203 __u8 status = *((__u8 *) skb->data);
204 void *sent;
205
206 BT_DBG("%s status 0x%x", hdev->name, status);
207
208 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
209 if (!sent)
210 return;
211
212 hci_dev_lock(hdev);
213
214 if (test_bit(HCI_MGMT, &hdev->flags))
215 mgmt_set_local_name_complete(hdev, sent, status);
216
217 if (status == 0)
218 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
219
220 hci_dev_unlock(hdev);
221 }
222
223 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
224 {
225 struct hci_rp_read_local_name *rp = (void *) skb->data;
226
227 BT_DBG("%s status 0x%x", hdev->name, rp->status);
228
229 if (rp->status)
230 return;
231
232 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
233 }
234
235 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
236 {
237 __u8 status = *((__u8 *) skb->data);
238 void *sent;
239
240 BT_DBG("%s status 0x%x", hdev->name, status);
241
242 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
243 if (!sent)
244 return;
245
246 if (!status) {
247 __u8 param = *((__u8 *) sent);
248
249 if (param == AUTH_ENABLED)
250 set_bit(HCI_AUTH, &hdev->flags);
251 else
252 clear_bit(HCI_AUTH, &hdev->flags);
253 }
254
255 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
256 }
257
258 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
259 {
260 __u8 status = *((__u8 *) skb->data);
261 void *sent;
262
263 BT_DBG("%s status 0x%x", hdev->name, status);
264
265 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
266 if (!sent)
267 return;
268
269 if (!status) {
270 __u8 param = *((__u8 *) sent);
271
272 if (param)
273 set_bit(HCI_ENCRYPT, &hdev->flags);
274 else
275 clear_bit(HCI_ENCRYPT, &hdev->flags);
276 }
277
278 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
279 }
280
281 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
282 {
283 __u8 param, status = *((__u8 *) skb->data);
284 int old_pscan, old_iscan;
285 void *sent;
286
287 BT_DBG("%s status 0x%x", hdev->name, status);
288
289 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
290 if (!sent)
291 return;
292
293 param = *((__u8 *) sent);
294
295 hci_dev_lock(hdev);
296
297 if (status != 0) {
298 mgmt_write_scan_failed(hdev, param, status);
299 hdev->discov_timeout = 0;
300 goto done;
301 }
302
303 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
304 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
305
306 if (param & SCAN_INQUIRY) {
307 set_bit(HCI_ISCAN, &hdev->flags);
308 if (!old_iscan)
309 mgmt_discoverable(hdev, 1);
310 if (hdev->discov_timeout > 0) {
311 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
312 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
313 to);
314 }
315 } else if (old_iscan)
316 mgmt_discoverable(hdev, 0);
317
318 if (param & SCAN_PAGE) {
319 set_bit(HCI_PSCAN, &hdev->flags);
320 if (!old_pscan)
321 mgmt_connectable(hdev, 1);
322 } else if (old_pscan)
323 mgmt_connectable(hdev, 0);
324
325 done:
326 hci_dev_unlock(hdev);
327 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
328 }
329
330 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
331 {
332 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
333
334 BT_DBG("%s status 0x%x", hdev->name, rp->status);
335
336 if (rp->status)
337 return;
338
339 memcpy(hdev->dev_class, rp->dev_class, 3);
340
341 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
342 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
343 }
344
345 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
346 {
347 __u8 status = *((__u8 *) skb->data);
348 void *sent;
349
350 BT_DBG("%s status 0x%x", hdev->name, status);
351
352 if (status)
353 return;
354
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
356 if (!sent)
357 return;
358
359 memcpy(hdev->dev_class, sent, 3);
360 }
361
362 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
363 {
364 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
365 __u16 setting;
366
367 BT_DBG("%s status 0x%x", hdev->name, rp->status);
368
369 if (rp->status)
370 return;
371
372 setting = __le16_to_cpu(rp->voice_setting);
373
374 if (hdev->voice_setting == setting)
375 return;
376
377 hdev->voice_setting = setting;
378
379 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
380
381 if (hdev->notify)
382 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
383 }
384
385 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
386 {
387 __u8 status = *((__u8 *) skb->data);
388 __u16 setting;
389 void *sent;
390
391 BT_DBG("%s status 0x%x", hdev->name, status);
392
393 if (status)
394 return;
395
396 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
397 if (!sent)
398 return;
399
400 setting = get_unaligned_le16(sent);
401
402 if (hdev->voice_setting == setting)
403 return;
404
405 hdev->voice_setting = setting;
406
407 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
408
409 if (hdev->notify)
410 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
411 }
412
413 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
414 {
415 __u8 status = *((__u8 *) skb->data);
416
417 BT_DBG("%s status 0x%x", hdev->name, status);
418
419 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
420 }
421
422 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
423 {
424 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
425
426 BT_DBG("%s status 0x%x", hdev->name, rp->status);
427
428 if (rp->status)
429 return;
430
431 hdev->ssp_mode = rp->mode;
432 }
433
434 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
435 {
436 __u8 status = *((__u8 *) skb->data);
437 void *sent;
438
439 BT_DBG("%s status 0x%x", hdev->name, status);
440
441 if (status)
442 return;
443
444 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
445 if (!sent)
446 return;
447
448 hdev->ssp_mode = *((__u8 *) sent);
449 }
450
451 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
452 {
453 if (hdev->features[6] & LMP_EXT_INQ)
454 return 2;
455
456 if (hdev->features[3] & LMP_RSSI_INQ)
457 return 1;
458
459 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
460 hdev->lmp_subver == 0x0757)
461 return 1;
462
463 if (hdev->manufacturer == 15) {
464 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
465 return 1;
466 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
467 return 1;
468 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
469 return 1;
470 }
471
472 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
473 hdev->lmp_subver == 0x1805)
474 return 1;
475
476 return 0;
477 }
478
479 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
480 {
481 u8 mode;
482
483 mode = hci_get_inquiry_mode(hdev);
484
485 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
486 }
487
488 static void hci_setup_event_mask(struct hci_dev *hdev)
489 {
490 /* The second byte is 0xff instead of 0x9f (two reserved bits
491 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
492 * command otherwise */
493 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
494
495 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
496 * any event mask for pre 1.2 devices */
497 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
498 return;
499
500 events[4] |= 0x01; /* Flow Specification Complete */
501 events[4] |= 0x02; /* Inquiry Result with RSSI */
502 events[4] |= 0x04; /* Read Remote Extended Features Complete */
503 events[5] |= 0x08; /* Synchronous Connection Complete */
504 events[5] |= 0x10; /* Synchronous Connection Changed */
505
506 if (hdev->features[3] & LMP_RSSI_INQ)
507 events[4] |= 0x04; /* Inquiry Result with RSSI */
508
509 if (hdev->features[5] & LMP_SNIFF_SUBR)
510 events[5] |= 0x20; /* Sniff Subrating */
511
512 if (hdev->features[5] & LMP_PAUSE_ENC)
513 events[5] |= 0x80; /* Encryption Key Refresh Complete */
514
515 if (hdev->features[6] & LMP_EXT_INQ)
516 events[5] |= 0x40; /* Extended Inquiry Result */
517
518 if (hdev->features[6] & LMP_NO_FLUSH)
519 events[7] |= 0x01; /* Enhanced Flush Complete */
520
521 if (hdev->features[7] & LMP_LSTO)
522 events[6] |= 0x80; /* Link Supervision Timeout Changed */
523
524 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
525 events[6] |= 0x01; /* IO Capability Request */
526 events[6] |= 0x02; /* IO Capability Response */
527 events[6] |= 0x04; /* User Confirmation Request */
528 events[6] |= 0x08; /* User Passkey Request */
529 events[6] |= 0x10; /* Remote OOB Data Request */
530 events[6] |= 0x20; /* Simple Pairing Complete */
531 events[7] |= 0x04; /* User Passkey Notification */
532 events[7] |= 0x08; /* Keypress Notification */
533 events[7] |= 0x10; /* Remote Host Supported
534 * Features Notification */
535 }
536
537 if (hdev->features[4] & LMP_LE)
538 events[7] |= 0x20; /* LE Meta-Event */
539
540 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
541 }
542
543 static void hci_set_le_support(struct hci_dev *hdev)
544 {
545 struct hci_cp_write_le_host_supported cp;
546
547 memset(&cp, 0, sizeof(cp));
548
549 if (enable_le) {
550 cp.le = 1;
551 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
552 }
553
554 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
555 }
556
557 static void hci_setup(struct hci_dev *hdev)
558 {
559 if (hdev->dev_type != HCI_BREDR)
560 return;
561
562 hci_setup_event_mask(hdev);
563
564 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
565 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
566
567 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
568 u8 mode = 0x01;
569 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
570 }
571
572 if (hdev->features[3] & LMP_RSSI_INQ)
573 hci_setup_inquiry_mode(hdev);
574
575 if (hdev->features[7] & LMP_INQ_TX_PWR)
576 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
577
578 if (hdev->features[7] & LMP_EXTFEATURES) {
579 struct hci_cp_read_local_ext_features cp;
580
581 cp.page = 0x01;
582 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
583 sizeof(cp), &cp);
584 }
585
586 if (hdev->features[4] & LMP_LE)
587 hci_set_le_support(hdev);
588 }
589
590 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
591 {
592 struct hci_rp_read_local_version *rp = (void *) skb->data;
593
594 BT_DBG("%s status 0x%x", hdev->name, rp->status);
595
596 if (rp->status)
597 return;
598
599 hdev->hci_ver = rp->hci_ver;
600 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
601 hdev->lmp_ver = rp->lmp_ver;
602 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
603 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
604
605 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
606 hdev->manufacturer,
607 hdev->hci_ver, hdev->hci_rev);
608
609 if (test_bit(HCI_INIT, &hdev->flags))
610 hci_setup(hdev);
611 }
612
613 static void hci_setup_link_policy(struct hci_dev *hdev)
614 {
615 u16 link_policy = 0;
616
617 if (hdev->features[0] & LMP_RSWITCH)
618 link_policy |= HCI_LP_RSWITCH;
619 if (hdev->features[0] & LMP_HOLD)
620 link_policy |= HCI_LP_HOLD;
621 if (hdev->features[0] & LMP_SNIFF)
622 link_policy |= HCI_LP_SNIFF;
623 if (hdev->features[1] & LMP_PARK)
624 link_policy |= HCI_LP_PARK;
625
626 link_policy = cpu_to_le16(link_policy);
627 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
628 sizeof(link_policy), &link_policy);
629 }
630
631 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
632 {
633 struct hci_rp_read_local_commands *rp = (void *) skb->data;
634
635 BT_DBG("%s status 0x%x", hdev->name, rp->status);
636
637 if (rp->status)
638 goto done;
639
640 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
641
642 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
643 hci_setup_link_policy(hdev);
644
645 done:
646 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
647 }
648
649 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
650 {
651 struct hci_rp_read_local_features *rp = (void *) skb->data;
652
653 BT_DBG("%s status 0x%x", hdev->name, rp->status);
654
655 if (rp->status)
656 return;
657
658 memcpy(hdev->features, rp->features, 8);
659
660 /* Adjust default settings according to features
661 * supported by device. */
662
663 if (hdev->features[0] & LMP_3SLOT)
664 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
665
666 if (hdev->features[0] & LMP_5SLOT)
667 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
668
669 if (hdev->features[1] & LMP_HV2) {
670 hdev->pkt_type |= (HCI_HV2);
671 hdev->esco_type |= (ESCO_HV2);
672 }
673
674 if (hdev->features[1] & LMP_HV3) {
675 hdev->pkt_type |= (HCI_HV3);
676 hdev->esco_type |= (ESCO_HV3);
677 }
678
679 if (hdev->features[3] & LMP_ESCO)
680 hdev->esco_type |= (ESCO_EV3);
681
682 if (hdev->features[4] & LMP_EV4)
683 hdev->esco_type |= (ESCO_EV4);
684
685 if (hdev->features[4] & LMP_EV5)
686 hdev->esco_type |= (ESCO_EV5);
687
688 if (hdev->features[5] & LMP_EDR_ESCO_2M)
689 hdev->esco_type |= (ESCO_2EV3);
690
691 if (hdev->features[5] & LMP_EDR_ESCO_3M)
692 hdev->esco_type |= (ESCO_3EV3);
693
694 if (hdev->features[5] & LMP_EDR_3S_ESCO)
695 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
696
697 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
698 hdev->features[0], hdev->features[1],
699 hdev->features[2], hdev->features[3],
700 hdev->features[4], hdev->features[5],
701 hdev->features[6], hdev->features[7]);
702 }
703
704 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
705 struct sk_buff *skb)
706 {
707 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
708
709 BT_DBG("%s status 0x%x", hdev->name, rp->status);
710
711 if (rp->status)
712 return;
713
714 switch (rp->page) {
715 case 0:
716 memcpy(hdev->features, rp->features, 8);
717 break;
718 case 1:
719 memcpy(hdev->host_features, rp->features, 8);
720 break;
721 }
722
723 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
724 }
725
726 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
727 struct sk_buff *skb)
728 {
729 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
730
731 BT_DBG("%s status 0x%x", hdev->name, rp->status);
732
733 if (rp->status)
734 return;
735
736 hdev->flow_ctl_mode = rp->mode;
737
738 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
739 }
740
741 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
742 {
743 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
744
745 BT_DBG("%s status 0x%x", hdev->name, rp->status);
746
747 if (rp->status)
748 return;
749
750 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
751 hdev->sco_mtu = rp->sco_mtu;
752 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
753 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
754
755 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
756 hdev->sco_mtu = 64;
757 hdev->sco_pkts = 8;
758 }
759
760 hdev->acl_cnt = hdev->acl_pkts;
761 hdev->sco_cnt = hdev->sco_pkts;
762
763 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
764 hdev->acl_mtu, hdev->acl_pkts,
765 hdev->sco_mtu, hdev->sco_pkts);
766 }
767
768 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
769 {
770 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
771
772 BT_DBG("%s status 0x%x", hdev->name, rp->status);
773
774 if (!rp->status)
775 bacpy(&hdev->bdaddr, &rp->bdaddr);
776
777 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
778 }
779
780 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
781 struct sk_buff *skb)
782 {
783 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
784
785 BT_DBG("%s status 0x%x", hdev->name, rp->status);
786
787 if (rp->status)
788 return;
789
790 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
791 hdev->block_len = __le16_to_cpu(rp->block_len);
792 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
793
794 hdev->block_cnt = hdev->num_blocks;
795
796 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
797 hdev->block_cnt, hdev->block_len);
798
799 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
800 }
801
802 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
803 {
804 __u8 status = *((__u8 *) skb->data);
805
806 BT_DBG("%s status 0x%x", hdev->name, status);
807
808 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
809 }
810
811 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
812 struct sk_buff *skb)
813 {
814 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
815
816 BT_DBG("%s status 0x%x", hdev->name, rp->status);
817
818 if (rp->status)
819 return;
820
821 hdev->amp_status = rp->amp_status;
822 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
823 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
824 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
825 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
826 hdev->amp_type = rp->amp_type;
827 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
828 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
829 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
830 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
831
832 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
833 }
834
835 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
836 struct sk_buff *skb)
837 {
838 __u8 status = *((__u8 *) skb->data);
839
840 BT_DBG("%s status 0x%x", hdev->name, status);
841
842 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
843 }
844
845 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
846 {
847 __u8 status = *((__u8 *) skb->data);
848
849 BT_DBG("%s status 0x%x", hdev->name, status);
850
851 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
852 }
853
854 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
855 struct sk_buff *skb)
856 {
857 __u8 status = *((__u8 *) skb->data);
858
859 BT_DBG("%s status 0x%x", hdev->name, status);
860
861 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
862 }
863
864 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
865 struct sk_buff *skb)
866 {
867 __u8 status = *((__u8 *) skb->data);
868
869 BT_DBG("%s status 0x%x", hdev->name, status);
870
871 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
872 }
873
874 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
875 {
876 __u8 status = *((__u8 *) skb->data);
877
878 BT_DBG("%s status 0x%x", hdev->name, status);
879
880 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
881 }
882
883 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
884 {
885 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
886 struct hci_cp_pin_code_reply *cp;
887 struct hci_conn *conn;
888
889 BT_DBG("%s status 0x%x", hdev->name, rp->status);
890
891 hci_dev_lock(hdev);
892
893 if (test_bit(HCI_MGMT, &hdev->flags))
894 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
895
896 if (rp->status != 0)
897 goto unlock;
898
899 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
900 if (!cp)
901 goto unlock;
902
903 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
904 if (conn)
905 conn->pin_length = cp->pin_len;
906
907 unlock:
908 hci_dev_unlock(hdev);
909 }
910
911 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
912 {
913 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
914
915 BT_DBG("%s status 0x%x", hdev->name, rp->status);
916
917 hci_dev_lock(hdev);
918
919 if (test_bit(HCI_MGMT, &hdev->flags))
920 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
921 rp->status);
922
923 hci_dev_unlock(hdev);
924 }
925
926 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
927 struct sk_buff *skb)
928 {
929 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
930
931 BT_DBG("%s status 0x%x", hdev->name, rp->status);
932
933 if (rp->status)
934 return;
935
936 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
937 hdev->le_pkts = rp->le_max_pkt;
938
939 hdev->le_cnt = hdev->le_pkts;
940
941 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
942
943 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
944 }
945
946 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
947 {
948 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
949
950 BT_DBG("%s status 0x%x", hdev->name, rp->status);
951
952 hci_dev_lock(hdev);
953
954 if (test_bit(HCI_MGMT, &hdev->flags))
955 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
956 rp->status);
957
958 hci_dev_unlock(hdev);
959 }
960
961 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
962 struct sk_buff *skb)
963 {
964 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
965
966 BT_DBG("%s status 0x%x", hdev->name, rp->status);
967
968 hci_dev_lock(hdev);
969
970 if (test_bit(HCI_MGMT, &hdev->flags))
971 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
972 rp->status);
973
974 hci_dev_unlock(hdev);
975 }
976
977 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
978 {
979 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
980
981 BT_DBG("%s status 0x%x", hdev->name, rp->status);
982
983 hci_dev_lock(hdev);
984
985 if (test_bit(HCI_MGMT, &hdev->flags))
986 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
987 rp->status);
988
989 hci_dev_unlock(hdev);
990 }
991
992 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
993 struct sk_buff *skb)
994 {
995 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
996
997 BT_DBG("%s status 0x%x", hdev->name, rp->status);
998
999 hci_dev_lock(hdev);
1000
1001 if (test_bit(HCI_MGMT, &hdev->flags))
1002 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1003 rp->status);
1004
1005 hci_dev_unlock(hdev);
1006 }
1007
1008 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1009 struct sk_buff *skb)
1010 {
1011 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1012
1013 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1014
1015 hci_dev_lock(hdev);
1016 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1017 rp->randomizer, rp->status);
1018 hci_dev_unlock(hdev);
1019 }
1020
1021 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1022 {
1023 __u8 status = *((__u8 *) skb->data);
1024
1025 BT_DBG("%s status 0x%x", hdev->name, status);
1026 }
1027
1028 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1029 struct sk_buff *skb)
1030 {
1031 struct hci_cp_le_set_scan_enable *cp;
1032 __u8 status = *((__u8 *) skb->data);
1033
1034 BT_DBG("%s status 0x%x", hdev->name, status);
1035
1036 if (status)
1037 return;
1038
1039 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1040 if (!cp)
1041 return;
1042
1043 switch (cp->enable) {
1044 case LE_SCANNING_ENABLED:
1045 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1046
1047 cancel_delayed_work_sync(&hdev->adv_work);
1048
1049 hci_dev_lock(hdev);
1050 hci_adv_entries_clear(hdev);
1051 hci_dev_unlock(hdev);
1052 break;
1053
1054 case LE_SCANNING_DISABLED:
1055 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1056
1057 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1058 break;
1059
1060 default:
1061 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1062 break;
1063 }
1064 }
1065
1066 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1067 {
1068 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1069
1070 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1071
1072 if (rp->status)
1073 return;
1074
1075 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1076 }
1077
1078 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1079 {
1080 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1081
1082 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1083
1084 if (rp->status)
1085 return;
1086
1087 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1088 }
1089
1090 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1091 struct sk_buff *skb)
1092 {
1093 struct hci_cp_read_local_ext_features cp;
1094 __u8 status = *((__u8 *) skb->data);
1095
1096 BT_DBG("%s status 0x%x", hdev->name, status);
1097
1098 if (status)
1099 return;
1100
1101 cp.page = 0x01;
1102 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
1103 }
1104
1105 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1106 {
1107 BT_DBG("%s status 0x%x", hdev->name, status);
1108
1109 if (status) {
1110 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1111 hci_conn_check_pending(hdev);
1112 hci_dev_lock(hdev);
1113 if (test_bit(HCI_MGMT, &hdev->flags))
1114 mgmt_start_discovery_failed(hdev, status);
1115 hci_dev_unlock(hdev);
1116 return;
1117 }
1118
1119 set_bit(HCI_INQUIRY, &hdev->flags);
1120
1121 hci_dev_lock(hdev);
1122 mgmt_discovering(hdev, 1);
1123 hci_dev_unlock(hdev);
1124 }
1125
1126 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1127 {
1128 struct hci_cp_create_conn *cp;
1129 struct hci_conn *conn;
1130
1131 BT_DBG("%s status 0x%x", hdev->name, status);
1132
1133 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1134 if (!cp)
1135 return;
1136
1137 hci_dev_lock(hdev);
1138
1139 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1140
1141 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1142
1143 if (status) {
1144 if (conn && conn->state == BT_CONNECT) {
1145 if (status != 0x0c || conn->attempt > 2) {
1146 conn->state = BT_CLOSED;
1147 hci_proto_connect_cfm(conn, status);
1148 hci_conn_del(conn);
1149 } else
1150 conn->state = BT_CONNECT2;
1151 }
1152 } else {
1153 if (!conn) {
1154 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1155 if (conn) {
1156 conn->out = 1;
1157 conn->link_mode |= HCI_LM_MASTER;
1158 } else
1159 BT_ERR("No memory for new connection");
1160 }
1161 }
1162
1163 hci_dev_unlock(hdev);
1164 }
1165
1166 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1167 {
1168 struct hci_cp_add_sco *cp;
1169 struct hci_conn *acl, *sco;
1170 __u16 handle;
1171
1172 BT_DBG("%s status 0x%x", hdev->name, status);
1173
1174 if (!status)
1175 return;
1176
1177 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1178 if (!cp)
1179 return;
1180
1181 handle = __le16_to_cpu(cp->handle);
1182
1183 BT_DBG("%s handle %d", hdev->name, handle);
1184
1185 hci_dev_lock(hdev);
1186
1187 acl = hci_conn_hash_lookup_handle(hdev, handle);
1188 if (acl) {
1189 sco = acl->link;
1190 if (sco) {
1191 sco->state = BT_CLOSED;
1192
1193 hci_proto_connect_cfm(sco, status);
1194 hci_conn_del(sco);
1195 }
1196 }
1197
1198 hci_dev_unlock(hdev);
1199 }
1200
1201 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1202 {
1203 struct hci_cp_auth_requested *cp;
1204 struct hci_conn *conn;
1205
1206 BT_DBG("%s status 0x%x", hdev->name, status);
1207
1208 if (!status)
1209 return;
1210
1211 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1212 if (!cp)
1213 return;
1214
1215 hci_dev_lock(hdev);
1216
1217 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1218 if (conn) {
1219 if (conn->state == BT_CONFIG) {
1220 hci_proto_connect_cfm(conn, status);
1221 hci_conn_put(conn);
1222 }
1223 }
1224
1225 hci_dev_unlock(hdev);
1226 }
1227
1228 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1229 {
1230 struct hci_cp_set_conn_encrypt *cp;
1231 struct hci_conn *conn;
1232
1233 BT_DBG("%s status 0x%x", hdev->name, status);
1234
1235 if (!status)
1236 return;
1237
1238 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1239 if (!cp)
1240 return;
1241
1242 hci_dev_lock(hdev);
1243
1244 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1245 if (conn) {
1246 if (conn->state == BT_CONFIG) {
1247 hci_proto_connect_cfm(conn, status);
1248 hci_conn_put(conn);
1249 }
1250 }
1251
1252 hci_dev_unlock(hdev);
1253 }
1254
1255 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1256 struct hci_conn *conn)
1257 {
1258 if (conn->state != BT_CONFIG || !conn->out)
1259 return 0;
1260
1261 if (conn->pending_sec_level == BT_SECURITY_SDP)
1262 return 0;
1263
1264 /* Only request authentication for SSP connections or non-SSP
1265 * devices with sec_level HIGH or if MITM protection is requested */
1266 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
1267 conn->pending_sec_level != BT_SECURITY_HIGH &&
1268 !(conn->auth_type & 0x01))
1269 return 0;
1270
1271 return 1;
1272 }
1273
1274 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1275 {
1276 struct hci_cp_remote_name_req *cp;
1277 struct hci_conn *conn;
1278
1279 BT_DBG("%s status 0x%x", hdev->name, status);
1280
1281 /* If successful wait for the name req complete event before
1282 * checking for the need to do authentication */
1283 if (!status)
1284 return;
1285
1286 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1287 if (!cp)
1288 return;
1289
1290 hci_dev_lock(hdev);
1291
1292 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1293 if (!conn)
1294 goto unlock;
1295
1296 if (!hci_outgoing_auth_needed(hdev, conn))
1297 goto unlock;
1298
1299 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1300 struct hci_cp_auth_requested cp;
1301 cp.handle = __cpu_to_le16(conn->handle);
1302 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1303 }
1304
1305 unlock:
1306 hci_dev_unlock(hdev);
1307 }
1308
1309 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1310 {
1311 struct hci_cp_read_remote_features *cp;
1312 struct hci_conn *conn;
1313
1314 BT_DBG("%s status 0x%x", hdev->name, status);
1315
1316 if (!status)
1317 return;
1318
1319 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1320 if (!cp)
1321 return;
1322
1323 hci_dev_lock(hdev);
1324
1325 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1326 if (conn) {
1327 if (conn->state == BT_CONFIG) {
1328 hci_proto_connect_cfm(conn, status);
1329 hci_conn_put(conn);
1330 }
1331 }
1332
1333 hci_dev_unlock(hdev);
1334 }
1335
1336 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1337 {
1338 struct hci_cp_read_remote_ext_features *cp;
1339 struct hci_conn *conn;
1340
1341 BT_DBG("%s status 0x%x", hdev->name, status);
1342
1343 if (!status)
1344 return;
1345
1346 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1347 if (!cp)
1348 return;
1349
1350 hci_dev_lock(hdev);
1351
1352 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1353 if (conn) {
1354 if (conn->state == BT_CONFIG) {
1355 hci_proto_connect_cfm(conn, status);
1356 hci_conn_put(conn);
1357 }
1358 }
1359
1360 hci_dev_unlock(hdev);
1361 }
1362
1363 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1364 {
1365 struct hci_cp_setup_sync_conn *cp;
1366 struct hci_conn *acl, *sco;
1367 __u16 handle;
1368
1369 BT_DBG("%s status 0x%x", hdev->name, status);
1370
1371 if (!status)
1372 return;
1373
1374 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1375 if (!cp)
1376 return;
1377
1378 handle = __le16_to_cpu(cp->handle);
1379
1380 BT_DBG("%s handle %d", hdev->name, handle);
1381
1382 hci_dev_lock(hdev);
1383
1384 acl = hci_conn_hash_lookup_handle(hdev, handle);
1385 if (acl) {
1386 sco = acl->link;
1387 if (sco) {
1388 sco->state = BT_CLOSED;
1389
1390 hci_proto_connect_cfm(sco, status);
1391 hci_conn_del(sco);
1392 }
1393 }
1394
1395 hci_dev_unlock(hdev);
1396 }
1397
1398 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1399 {
1400 struct hci_cp_sniff_mode *cp;
1401 struct hci_conn *conn;
1402
1403 BT_DBG("%s status 0x%x", hdev->name, status);
1404
1405 if (!status)
1406 return;
1407
1408 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1409 if (!cp)
1410 return;
1411
1412 hci_dev_lock(hdev);
1413
1414 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1415 if (conn) {
1416 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1417
1418 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1419 hci_sco_setup(conn, status);
1420 }
1421
1422 hci_dev_unlock(hdev);
1423 }
1424
1425 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1426 {
1427 struct hci_cp_exit_sniff_mode *cp;
1428 struct hci_conn *conn;
1429
1430 BT_DBG("%s status 0x%x", hdev->name, status);
1431
1432 if (!status)
1433 return;
1434
1435 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1436 if (!cp)
1437 return;
1438
1439 hci_dev_lock(hdev);
1440
1441 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1442 if (conn) {
1443 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1444
1445 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1446 hci_sco_setup(conn, status);
1447 }
1448
1449 hci_dev_unlock(hdev);
1450 }
1451
1452 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1453 {
1454 struct hci_cp_le_create_conn *cp;
1455 struct hci_conn *conn;
1456
1457 BT_DBG("%s status 0x%x", hdev->name, status);
1458
1459 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1460 if (!cp)
1461 return;
1462
1463 hci_dev_lock(hdev);
1464
1465 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1466
1467 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1468 conn);
1469
1470 if (status) {
1471 if (conn && conn->state == BT_CONNECT) {
1472 conn->state = BT_CLOSED;
1473 hci_proto_connect_cfm(conn, status);
1474 hci_conn_del(conn);
1475 }
1476 } else {
1477 if (!conn) {
1478 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1479 if (conn) {
1480 conn->dst_type = cp->peer_addr_type;
1481 conn->out = 1;
1482 } else {
1483 BT_ERR("No memory for new connection");
1484 }
1485 }
1486 }
1487
1488 hci_dev_unlock(hdev);
1489 }
1490
1491 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1492 {
1493 BT_DBG("%s status 0x%x", hdev->name, status);
1494 }
1495
1496 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1497 {
1498 __u8 status = *((__u8 *) skb->data);
1499
1500 BT_DBG("%s status %d", hdev->name, status);
1501
1502 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1503
1504 hci_conn_check_pending(hdev);
1505
1506 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1507 return;
1508
1509 hci_dev_lock(hdev);
1510 mgmt_discovering(hdev, 0);
1511 hci_dev_unlock(hdev);
1512 }
1513
1514 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1515 {
1516 struct inquiry_data data;
1517 struct inquiry_info *info = (void *) (skb->data + 1);
1518 int num_rsp = *((__u8 *) skb->data);
1519
1520 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1521
1522 if (!num_rsp)
1523 return;
1524
1525 hci_dev_lock(hdev);
1526
1527 for (; num_rsp; num_rsp--, info++) {
1528 bool name_known;
1529
1530 bacpy(&data.bdaddr, &info->bdaddr);
1531 data.pscan_rep_mode = info->pscan_rep_mode;
1532 data.pscan_period_mode = info->pscan_period_mode;
1533 data.pscan_mode = info->pscan_mode;
1534 memcpy(data.dev_class, info->dev_class, 3);
1535 data.clock_offset = info->clock_offset;
1536 data.rssi = 0x00;
1537 data.ssp_mode = 0x00;
1538
1539 name_known = hci_inquiry_cache_update(hdev, &data, false);
1540 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1541 info->dev_class, 0, !name_known, NULL);
1542 }
1543
1544 hci_dev_unlock(hdev);
1545 }
1546
1547 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1548 {
1549 struct hci_ev_conn_complete *ev = (void *) skb->data;
1550 struct hci_conn *conn;
1551
1552 BT_DBG("%s", hdev->name);
1553
1554 hci_dev_lock(hdev);
1555
1556 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1557 if (!conn) {
1558 if (ev->link_type != SCO_LINK)
1559 goto unlock;
1560
1561 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1562 if (!conn)
1563 goto unlock;
1564
1565 conn->type = SCO_LINK;
1566 }
1567
1568 if (!ev->status) {
1569 conn->handle = __le16_to_cpu(ev->handle);
1570
1571 if (conn->type == ACL_LINK) {
1572 conn->state = BT_CONFIG;
1573 hci_conn_hold(conn);
1574 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1575 mgmt_connected(hdev, &ev->bdaddr, conn->type,
1576 conn->dst_type);
1577 } else
1578 conn->state = BT_CONNECTED;
1579
1580 hci_conn_hold_device(conn);
1581 hci_conn_add_sysfs(conn);
1582
1583 if (test_bit(HCI_AUTH, &hdev->flags))
1584 conn->link_mode |= HCI_LM_AUTH;
1585
1586 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1587 conn->link_mode |= HCI_LM_ENCRYPT;
1588
1589 /* Get remote features */
1590 if (conn->type == ACL_LINK) {
1591 struct hci_cp_read_remote_features cp;
1592 cp.handle = ev->handle;
1593 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1594 sizeof(cp), &cp);
1595 }
1596
1597 /* Set packet type for incoming connection */
1598 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1599 struct hci_cp_change_conn_ptype cp;
1600 cp.handle = ev->handle;
1601 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1602 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1603 sizeof(cp), &cp);
1604 }
1605 } else {
1606 conn->state = BT_CLOSED;
1607 if (conn->type == ACL_LINK)
1608 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1609 conn->dst_type, ev->status);
1610 }
1611
1612 if (conn->type == ACL_LINK)
1613 hci_sco_setup(conn, ev->status);
1614
1615 if (ev->status) {
1616 hci_proto_connect_cfm(conn, ev->status);
1617 hci_conn_del(conn);
1618 } else if (ev->link_type != ACL_LINK)
1619 hci_proto_connect_cfm(conn, ev->status);
1620
1621 unlock:
1622 hci_dev_unlock(hdev);
1623
1624 hci_conn_check_pending(hdev);
1625 }
1626
1627 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1628 {
1629 struct hci_ev_conn_request *ev = (void *) skb->data;
1630 int mask = hdev->link_mode;
1631
1632 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1633 batostr(&ev->bdaddr), ev->link_type);
1634
1635 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1636
1637 if ((mask & HCI_LM_ACCEPT) &&
1638 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1639 /* Connection accepted */
1640 struct inquiry_entry *ie;
1641 struct hci_conn *conn;
1642
1643 hci_dev_lock(hdev);
1644
1645 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1646 if (ie)
1647 memcpy(ie->data.dev_class, ev->dev_class, 3);
1648
1649 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1650 if (!conn) {
1651 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1652 if (!conn) {
1653 BT_ERR("No memory for new connection");
1654 hci_dev_unlock(hdev);
1655 return;
1656 }
1657 }
1658
1659 memcpy(conn->dev_class, ev->dev_class, 3);
1660 conn->state = BT_CONNECT;
1661
1662 hci_dev_unlock(hdev);
1663
1664 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1665 struct hci_cp_accept_conn_req cp;
1666
1667 bacpy(&cp.bdaddr, &ev->bdaddr);
1668
1669 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1670 cp.role = 0x00; /* Become master */
1671 else
1672 cp.role = 0x01; /* Remain slave */
1673
1674 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1675 sizeof(cp), &cp);
1676 } else {
1677 struct hci_cp_accept_sync_conn_req cp;
1678
1679 bacpy(&cp.bdaddr, &ev->bdaddr);
1680 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1681
1682 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1683 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1684 cp.max_latency = cpu_to_le16(0xffff);
1685 cp.content_format = cpu_to_le16(hdev->voice_setting);
1686 cp.retrans_effort = 0xff;
1687
1688 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1689 sizeof(cp), &cp);
1690 }
1691 } else {
1692 /* Connection rejected */
1693 struct hci_cp_reject_conn_req cp;
1694
1695 bacpy(&cp.bdaddr, &ev->bdaddr);
1696 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1697 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1698 }
1699 }
1700
1701 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1702 {
1703 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1704 struct hci_conn *conn;
1705
1706 BT_DBG("%s status %d", hdev->name, ev->status);
1707
1708 hci_dev_lock(hdev);
1709
1710 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1711 if (!conn)
1712 goto unlock;
1713
1714 if (ev->status == 0)
1715 conn->state = BT_CLOSED;
1716
1717 if (conn->type == ACL_LINK || conn->type == LE_LINK) {
1718 if (ev->status != 0)
1719 mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
1720 else
1721 mgmt_disconnected(hdev, &conn->dst, conn->type,
1722 conn->dst_type);
1723 }
1724
1725 if (ev->status == 0) {
1726 hci_proto_disconn_cfm(conn, ev->reason);
1727 hci_conn_del(conn);
1728 }
1729
1730 unlock:
1731 hci_dev_unlock(hdev);
1732 }
1733
1734 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1735 {
1736 struct hci_ev_auth_complete *ev = (void *) skb->data;
1737 struct hci_conn *conn;
1738
1739 BT_DBG("%s status %d", hdev->name, ev->status);
1740
1741 hci_dev_lock(hdev);
1742
1743 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1744 if (!conn)
1745 goto unlock;
1746
1747 if (!ev->status) {
1748 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
1749 test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) {
1750 BT_INFO("re-auth of legacy device is not possible.");
1751 } else {
1752 conn->link_mode |= HCI_LM_AUTH;
1753 conn->sec_level = conn->pending_sec_level;
1754 }
1755 } else {
1756 mgmt_auth_failed(hdev, &conn->dst, ev->status);
1757 }
1758
1759 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1760 clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
1761
1762 if (conn->state == BT_CONFIG) {
1763 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
1764 struct hci_cp_set_conn_encrypt cp;
1765 cp.handle = ev->handle;
1766 cp.encrypt = 0x01;
1767 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1768 &cp);
1769 } else {
1770 conn->state = BT_CONNECTED;
1771 hci_proto_connect_cfm(conn, ev->status);
1772 hci_conn_put(conn);
1773 }
1774 } else {
1775 hci_auth_cfm(conn, ev->status);
1776
1777 hci_conn_hold(conn);
1778 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1779 hci_conn_put(conn);
1780 }
1781
1782 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1783 if (!ev->status) {
1784 struct hci_cp_set_conn_encrypt cp;
1785 cp.handle = ev->handle;
1786 cp.encrypt = 0x01;
1787 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1788 &cp);
1789 } else {
1790 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1791 hci_encrypt_cfm(conn, ev->status, 0x00);
1792 }
1793 }
1794
1795 unlock:
1796 hci_dev_unlock(hdev);
1797 }
1798
1799 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1800 {
1801 struct hci_ev_remote_name *ev = (void *) skb->data;
1802 struct hci_conn *conn;
1803
1804 BT_DBG("%s", hdev->name);
1805
1806 hci_conn_check_pending(hdev);
1807
1808 hci_dev_lock(hdev);
1809
1810 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1811 mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
1812
1813 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1814 if (!conn)
1815 goto unlock;
1816
1817 if (!hci_outgoing_auth_needed(hdev, conn))
1818 goto unlock;
1819
1820 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1821 struct hci_cp_auth_requested cp;
1822 cp.handle = __cpu_to_le16(conn->handle);
1823 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1824 }
1825
1826 unlock:
1827 hci_dev_unlock(hdev);
1828 }
1829
1830 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1831 {
1832 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1833 struct hci_conn *conn;
1834
1835 BT_DBG("%s status %d", hdev->name, ev->status);
1836
1837 hci_dev_lock(hdev);
1838
1839 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1840 if (conn) {
1841 if (!ev->status) {
1842 if (ev->encrypt) {
1843 /* Encryption implies authentication */
1844 conn->link_mode |= HCI_LM_AUTH;
1845 conn->link_mode |= HCI_LM_ENCRYPT;
1846 conn->sec_level = conn->pending_sec_level;
1847 } else
1848 conn->link_mode &= ~HCI_LM_ENCRYPT;
1849 }
1850
1851 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1852
1853 if (conn->state == BT_CONFIG) {
1854 if (!ev->status)
1855 conn->state = BT_CONNECTED;
1856
1857 hci_proto_connect_cfm(conn, ev->status);
1858 hci_conn_put(conn);
1859 } else
1860 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1861 }
1862
1863 hci_dev_unlock(hdev);
1864 }
1865
1866 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1867 {
1868 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1869 struct hci_conn *conn;
1870
1871 BT_DBG("%s status %d", hdev->name, ev->status);
1872
1873 hci_dev_lock(hdev);
1874
1875 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1876 if (conn) {
1877 if (!ev->status)
1878 conn->link_mode |= HCI_LM_SECURE;
1879
1880 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1881
1882 hci_key_change_cfm(conn, ev->status);
1883 }
1884
1885 hci_dev_unlock(hdev);
1886 }
1887
1888 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1889 {
1890 struct hci_ev_remote_features *ev = (void *) skb->data;
1891 struct hci_conn *conn;
1892
1893 BT_DBG("%s status %d", hdev->name, ev->status);
1894
1895 hci_dev_lock(hdev);
1896
1897 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1898 if (!conn)
1899 goto unlock;
1900
1901 if (!ev->status)
1902 memcpy(conn->features, ev->features, 8);
1903
1904 if (conn->state != BT_CONFIG)
1905 goto unlock;
1906
1907 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1908 struct hci_cp_read_remote_ext_features cp;
1909 cp.handle = ev->handle;
1910 cp.page = 0x01;
1911 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1912 sizeof(cp), &cp);
1913 goto unlock;
1914 }
1915
1916 if (!ev->status) {
1917 struct hci_cp_remote_name_req cp;
1918 memset(&cp, 0, sizeof(cp));
1919 bacpy(&cp.bdaddr, &conn->dst);
1920 cp.pscan_rep_mode = 0x02;
1921 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1922 }
1923
1924 if (!hci_outgoing_auth_needed(hdev, conn)) {
1925 conn->state = BT_CONNECTED;
1926 hci_proto_connect_cfm(conn, ev->status);
1927 hci_conn_put(conn);
1928 }
1929
1930 unlock:
1931 hci_dev_unlock(hdev);
1932 }
1933
1934 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1935 {
1936 BT_DBG("%s", hdev->name);
1937 }
1938
1939 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1940 {
1941 BT_DBG("%s", hdev->name);
1942 }
1943
1944 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1945 {
1946 struct hci_ev_cmd_complete *ev = (void *) skb->data;
1947 __u16 opcode;
1948
1949 skb_pull(skb, sizeof(*ev));
1950
1951 opcode = __le16_to_cpu(ev->opcode);
1952
1953 switch (opcode) {
1954 case HCI_OP_INQUIRY_CANCEL:
1955 hci_cc_inquiry_cancel(hdev, skb);
1956 break;
1957
1958 case HCI_OP_EXIT_PERIODIC_INQ:
1959 hci_cc_exit_periodic_inq(hdev, skb);
1960 break;
1961
1962 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
1963 hci_cc_remote_name_req_cancel(hdev, skb);
1964 break;
1965
1966 case HCI_OP_ROLE_DISCOVERY:
1967 hci_cc_role_discovery(hdev, skb);
1968 break;
1969
1970 case HCI_OP_READ_LINK_POLICY:
1971 hci_cc_read_link_policy(hdev, skb);
1972 break;
1973
1974 case HCI_OP_WRITE_LINK_POLICY:
1975 hci_cc_write_link_policy(hdev, skb);
1976 break;
1977
1978 case HCI_OP_READ_DEF_LINK_POLICY:
1979 hci_cc_read_def_link_policy(hdev, skb);
1980 break;
1981
1982 case HCI_OP_WRITE_DEF_LINK_POLICY:
1983 hci_cc_write_def_link_policy(hdev, skb);
1984 break;
1985
1986 case HCI_OP_RESET:
1987 hci_cc_reset(hdev, skb);
1988 break;
1989
1990 case HCI_OP_WRITE_LOCAL_NAME:
1991 hci_cc_write_local_name(hdev, skb);
1992 break;
1993
1994 case HCI_OP_READ_LOCAL_NAME:
1995 hci_cc_read_local_name(hdev, skb);
1996 break;
1997
1998 case HCI_OP_WRITE_AUTH_ENABLE:
1999 hci_cc_write_auth_enable(hdev, skb);
2000 break;
2001
2002 case HCI_OP_WRITE_ENCRYPT_MODE:
2003 hci_cc_write_encrypt_mode(hdev, skb);
2004 break;
2005
2006 case HCI_OP_WRITE_SCAN_ENABLE:
2007 hci_cc_write_scan_enable(hdev, skb);
2008 break;
2009
2010 case HCI_OP_READ_CLASS_OF_DEV:
2011 hci_cc_read_class_of_dev(hdev, skb);
2012 break;
2013
2014 case HCI_OP_WRITE_CLASS_OF_DEV:
2015 hci_cc_write_class_of_dev(hdev, skb);
2016 break;
2017
2018 case HCI_OP_READ_VOICE_SETTING:
2019 hci_cc_read_voice_setting(hdev, skb);
2020 break;
2021
2022 case HCI_OP_WRITE_VOICE_SETTING:
2023 hci_cc_write_voice_setting(hdev, skb);
2024 break;
2025
2026 case HCI_OP_HOST_BUFFER_SIZE:
2027 hci_cc_host_buffer_size(hdev, skb);
2028 break;
2029
2030 case HCI_OP_READ_SSP_MODE:
2031 hci_cc_read_ssp_mode(hdev, skb);
2032 break;
2033
2034 case HCI_OP_WRITE_SSP_MODE:
2035 hci_cc_write_ssp_mode(hdev, skb);
2036 break;
2037
2038 case HCI_OP_READ_LOCAL_VERSION:
2039 hci_cc_read_local_version(hdev, skb);
2040 break;
2041
2042 case HCI_OP_READ_LOCAL_COMMANDS:
2043 hci_cc_read_local_commands(hdev, skb);
2044 break;
2045
2046 case HCI_OP_READ_LOCAL_FEATURES:
2047 hci_cc_read_local_features(hdev, skb);
2048 break;
2049
2050 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2051 hci_cc_read_local_ext_features(hdev, skb);
2052 break;
2053
2054 case HCI_OP_READ_BUFFER_SIZE:
2055 hci_cc_read_buffer_size(hdev, skb);
2056 break;
2057
2058 case HCI_OP_READ_BD_ADDR:
2059 hci_cc_read_bd_addr(hdev, skb);
2060 break;
2061
2062 case HCI_OP_READ_DATA_BLOCK_SIZE:
2063 hci_cc_read_data_block_size(hdev, skb);
2064 break;
2065
2066 case HCI_OP_WRITE_CA_TIMEOUT:
2067 hci_cc_write_ca_timeout(hdev, skb);
2068 break;
2069
2070 case HCI_OP_READ_FLOW_CONTROL_MODE:
2071 hci_cc_read_flow_control_mode(hdev, skb);
2072 break;
2073
2074 case HCI_OP_READ_LOCAL_AMP_INFO:
2075 hci_cc_read_local_amp_info(hdev, skb);
2076 break;
2077
2078 case HCI_OP_DELETE_STORED_LINK_KEY:
2079 hci_cc_delete_stored_link_key(hdev, skb);
2080 break;
2081
2082 case HCI_OP_SET_EVENT_MASK:
2083 hci_cc_set_event_mask(hdev, skb);
2084 break;
2085
2086 case HCI_OP_WRITE_INQUIRY_MODE:
2087 hci_cc_write_inquiry_mode(hdev, skb);
2088 break;
2089
2090 case HCI_OP_READ_INQ_RSP_TX_POWER:
2091 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2092 break;
2093
2094 case HCI_OP_SET_EVENT_FLT:
2095 hci_cc_set_event_flt(hdev, skb);
2096 break;
2097
2098 case HCI_OP_PIN_CODE_REPLY:
2099 hci_cc_pin_code_reply(hdev, skb);
2100 break;
2101
2102 case HCI_OP_PIN_CODE_NEG_REPLY:
2103 hci_cc_pin_code_neg_reply(hdev, skb);
2104 break;
2105
2106 case HCI_OP_READ_LOCAL_OOB_DATA:
2107 hci_cc_read_local_oob_data_reply(hdev, skb);
2108 break;
2109
2110 case HCI_OP_LE_READ_BUFFER_SIZE:
2111 hci_cc_le_read_buffer_size(hdev, skb);
2112 break;
2113
2114 case HCI_OP_USER_CONFIRM_REPLY:
2115 hci_cc_user_confirm_reply(hdev, skb);
2116 break;
2117
2118 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2119 hci_cc_user_confirm_neg_reply(hdev, skb);
2120 break;
2121
2122 case HCI_OP_USER_PASSKEY_REPLY:
2123 hci_cc_user_passkey_reply(hdev, skb);
2124 break;
2125
2126 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2127 hci_cc_user_passkey_neg_reply(hdev, skb);
2128
2129 case HCI_OP_LE_SET_SCAN_PARAM:
2130 hci_cc_le_set_scan_param(hdev, skb);
2131 break;
2132
2133 case HCI_OP_LE_SET_SCAN_ENABLE:
2134 hci_cc_le_set_scan_enable(hdev, skb);
2135 break;
2136
2137 case HCI_OP_LE_LTK_REPLY:
2138 hci_cc_le_ltk_reply(hdev, skb);
2139 break;
2140
2141 case HCI_OP_LE_LTK_NEG_REPLY:
2142 hci_cc_le_ltk_neg_reply(hdev, skb);
2143 break;
2144
2145 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2146 hci_cc_write_le_host_supported(hdev, skb);
2147 break;
2148
2149 default:
2150 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2151 break;
2152 }
2153
2154 if (ev->opcode != HCI_OP_NOP)
2155 del_timer(&hdev->cmd_timer);
2156
2157 if (ev->ncmd) {
2158 atomic_set(&hdev->cmd_cnt, 1);
2159 if (!skb_queue_empty(&hdev->cmd_q))
2160 queue_work(hdev->workqueue, &hdev->cmd_work);
2161 }
2162 }
2163
2164 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2165 {
2166 struct hci_ev_cmd_status *ev = (void *) skb->data;
2167 __u16 opcode;
2168
2169 skb_pull(skb, sizeof(*ev));
2170
2171 opcode = __le16_to_cpu(ev->opcode);
2172
2173 switch (opcode) {
2174 case HCI_OP_INQUIRY:
2175 hci_cs_inquiry(hdev, ev->status);
2176 break;
2177
2178 case HCI_OP_CREATE_CONN:
2179 hci_cs_create_conn(hdev, ev->status);
2180 break;
2181
2182 case HCI_OP_ADD_SCO:
2183 hci_cs_add_sco(hdev, ev->status);
2184 break;
2185
2186 case HCI_OP_AUTH_REQUESTED:
2187 hci_cs_auth_requested(hdev, ev->status);
2188 break;
2189
2190 case HCI_OP_SET_CONN_ENCRYPT:
2191 hci_cs_set_conn_encrypt(hdev, ev->status);
2192 break;
2193
2194 case HCI_OP_REMOTE_NAME_REQ:
2195 hci_cs_remote_name_req(hdev, ev->status);
2196 break;
2197
2198 case HCI_OP_READ_REMOTE_FEATURES:
2199 hci_cs_read_remote_features(hdev, ev->status);
2200 break;
2201
2202 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2203 hci_cs_read_remote_ext_features(hdev, ev->status);
2204 break;
2205
2206 case HCI_OP_SETUP_SYNC_CONN:
2207 hci_cs_setup_sync_conn(hdev, ev->status);
2208 break;
2209
2210 case HCI_OP_SNIFF_MODE:
2211 hci_cs_sniff_mode(hdev, ev->status);
2212 break;
2213
2214 case HCI_OP_EXIT_SNIFF_MODE:
2215 hci_cs_exit_sniff_mode(hdev, ev->status);
2216 break;
2217
2218 case HCI_OP_DISCONNECT:
2219 if (ev->status != 0)
2220 mgmt_disconnect_failed(hdev, NULL, ev->status);
2221 break;
2222
2223 case HCI_OP_LE_CREATE_CONN:
2224 hci_cs_le_create_conn(hdev, ev->status);
2225 break;
2226
2227 case HCI_OP_LE_START_ENC:
2228 hci_cs_le_start_enc(hdev, ev->status);
2229 break;
2230
2231 default:
2232 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2233 break;
2234 }
2235
2236 if (ev->opcode != HCI_OP_NOP)
2237 del_timer(&hdev->cmd_timer);
2238
2239 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2240 atomic_set(&hdev->cmd_cnt, 1);
2241 if (!skb_queue_empty(&hdev->cmd_q))
2242 queue_work(hdev->workqueue, &hdev->cmd_work);
2243 }
2244 }
2245
2246 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2247 {
2248 struct hci_ev_role_change *ev = (void *) skb->data;
2249 struct hci_conn *conn;
2250
2251 BT_DBG("%s status %d", hdev->name, ev->status);
2252
2253 hci_dev_lock(hdev);
2254
2255 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2256 if (conn) {
2257 if (!ev->status) {
2258 if (ev->role)
2259 conn->link_mode &= ~HCI_LM_MASTER;
2260 else
2261 conn->link_mode |= HCI_LM_MASTER;
2262 }
2263
2264 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
2265
2266 hci_role_switch_cfm(conn, ev->status, ev->role);
2267 }
2268
2269 hci_dev_unlock(hdev);
2270 }
2271
2272 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2273 {
2274 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2275 int i;
2276
2277 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2278 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2279 return;
2280 }
2281
2282 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2283 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2284 BT_DBG("%s bad parameters", hdev->name);
2285 return;
2286 }
2287
2288 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2289
2290 for (i = 0; i < ev->num_hndl; i++) {
2291 struct hci_comp_pkts_info *info = &ev->handles[i];
2292 struct hci_conn *conn;
2293 __u16 handle, count;
2294
2295 handle = __le16_to_cpu(info->handle);
2296 count = __le16_to_cpu(info->count);
2297
2298 conn = hci_conn_hash_lookup_handle(hdev, handle);
2299 if (!conn)
2300 continue;
2301
2302 conn->sent -= count;
2303
2304 switch (conn->type) {
2305 case ACL_LINK:
2306 hdev->acl_cnt += count;
2307 if (hdev->acl_cnt > hdev->acl_pkts)
2308 hdev->acl_cnt = hdev->acl_pkts;
2309 break;
2310
2311 case LE_LINK:
2312 if (hdev->le_pkts) {
2313 hdev->le_cnt += count;
2314 if (hdev->le_cnt > hdev->le_pkts)
2315 hdev->le_cnt = hdev->le_pkts;
2316 } else {
2317 hdev->acl_cnt += count;
2318 if (hdev->acl_cnt > hdev->acl_pkts)
2319 hdev->acl_cnt = hdev->acl_pkts;
2320 }
2321 break;
2322
2323 case SCO_LINK:
2324 hdev->sco_cnt += count;
2325 if (hdev->sco_cnt > hdev->sco_pkts)
2326 hdev->sco_cnt = hdev->sco_pkts;
2327 break;
2328
2329 default:
2330 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2331 break;
2332 }
2333 }
2334
2335 queue_work(hdev->workqueue, &hdev->tx_work);
2336 }
2337
2338 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2339 {
2340 struct hci_ev_mode_change *ev = (void *) skb->data;
2341 struct hci_conn *conn;
2342
2343 BT_DBG("%s status %d", hdev->name, ev->status);
2344
2345 hci_dev_lock(hdev);
2346
2347 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2348 if (conn) {
2349 conn->mode = ev->mode;
2350 conn->interval = __le16_to_cpu(ev->interval);
2351
2352 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
2353 if (conn->mode == HCI_CM_ACTIVE)
2354 conn->power_save = 1;
2355 else
2356 conn->power_save = 0;
2357 }
2358
2359 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
2360 hci_sco_setup(conn, ev->status);
2361 }
2362
2363 hci_dev_unlock(hdev);
2364 }
2365
2366 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2367 {
2368 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2369 struct hci_conn *conn;
2370
2371 BT_DBG("%s", hdev->name);
2372
2373 hci_dev_lock(hdev);
2374
2375 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2376 if (!conn)
2377 goto unlock;
2378
2379 if (conn->state == BT_CONNECTED) {
2380 hci_conn_hold(conn);
2381 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2382 hci_conn_put(conn);
2383 }
2384
2385 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
2386 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2387 sizeof(ev->bdaddr), &ev->bdaddr);
2388 else if (test_bit(HCI_MGMT, &hdev->flags)) {
2389 u8 secure;
2390
2391 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2392 secure = 1;
2393 else
2394 secure = 0;
2395
2396 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2397 }
2398
2399 unlock:
2400 hci_dev_unlock(hdev);
2401 }
2402
2403 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2404 {
2405 struct hci_ev_link_key_req *ev = (void *) skb->data;
2406 struct hci_cp_link_key_reply cp;
2407 struct hci_conn *conn;
2408 struct link_key *key;
2409
2410 BT_DBG("%s", hdev->name);
2411
2412 if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2413 return;
2414
2415 hci_dev_lock(hdev);
2416
2417 key = hci_find_link_key(hdev, &ev->bdaddr);
2418 if (!key) {
2419 BT_DBG("%s link key not found for %s", hdev->name,
2420 batostr(&ev->bdaddr));
2421 goto not_found;
2422 }
2423
2424 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2425 batostr(&ev->bdaddr));
2426
2427 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
2428 key->type == HCI_LK_DEBUG_COMBINATION) {
2429 BT_DBG("%s ignoring debug key", hdev->name);
2430 goto not_found;
2431 }
2432
2433 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2434 if (conn) {
2435 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2436 conn->auth_type != 0xff &&
2437 (conn->auth_type & 0x01)) {
2438 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2439 goto not_found;
2440 }
2441
2442 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2443 conn->pending_sec_level == BT_SECURITY_HIGH) {
2444 BT_DBG("%s ignoring key unauthenticated for high \
2445 security", hdev->name);
2446 goto not_found;
2447 }
2448
2449 conn->key_type = key->type;
2450 conn->pin_length = key->pin_len;
2451 }
2452
2453 bacpy(&cp.bdaddr, &ev->bdaddr);
2454 memcpy(cp.link_key, key->val, 16);
2455
2456 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2457
2458 hci_dev_unlock(hdev);
2459
2460 return;
2461
2462 not_found:
2463 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2464 hci_dev_unlock(hdev);
2465 }
2466
2467 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2468 {
2469 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2470 struct hci_conn *conn;
2471 u8 pin_len = 0;
2472
2473 BT_DBG("%s", hdev->name);
2474
2475 hci_dev_lock(hdev);
2476
2477 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2478 if (conn) {
2479 hci_conn_hold(conn);
2480 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2481 pin_len = conn->pin_length;
2482
2483 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2484 conn->key_type = ev->key_type;
2485
2486 hci_conn_put(conn);
2487 }
2488
2489 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2490 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2491 ev->key_type, pin_len);
2492
2493 hci_dev_unlock(hdev);
2494 }
2495
2496 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2497 {
2498 struct hci_ev_clock_offset *ev = (void *) skb->data;
2499 struct hci_conn *conn;
2500
2501 BT_DBG("%s status %d", hdev->name, ev->status);
2502
2503 hci_dev_lock(hdev);
2504
2505 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2506 if (conn && !ev->status) {
2507 struct inquiry_entry *ie;
2508
2509 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2510 if (ie) {
2511 ie->data.clock_offset = ev->clock_offset;
2512 ie->timestamp = jiffies;
2513 }
2514 }
2515
2516 hci_dev_unlock(hdev);
2517 }
2518
2519 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2520 {
2521 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2522 struct hci_conn *conn;
2523
2524 BT_DBG("%s status %d", hdev->name, ev->status);
2525
2526 hci_dev_lock(hdev);
2527
2528 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2529 if (conn && !ev->status)
2530 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2531
2532 hci_dev_unlock(hdev);
2533 }
2534
2535 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2536 {
2537 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2538 struct inquiry_entry *ie;
2539
2540 BT_DBG("%s", hdev->name);
2541
2542 hci_dev_lock(hdev);
2543
2544 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2545 if (ie) {
2546 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2547 ie->timestamp = jiffies;
2548 }
2549
2550 hci_dev_unlock(hdev);
2551 }
2552
2553 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2554 {
2555 struct inquiry_data data;
2556 int num_rsp = *((__u8 *) skb->data);
2557 bool name_known;
2558
2559 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2560
2561 if (!num_rsp)
2562 return;
2563
2564 hci_dev_lock(hdev);
2565
2566 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2567 struct inquiry_info_with_rssi_and_pscan_mode *info;
2568 info = (void *) (skb->data + 1);
2569
2570 for (; num_rsp; num_rsp--, info++) {
2571 bacpy(&data.bdaddr, &info->bdaddr);
2572 data.pscan_rep_mode = info->pscan_rep_mode;
2573 data.pscan_period_mode = info->pscan_period_mode;
2574 data.pscan_mode = info->pscan_mode;
2575 memcpy(data.dev_class, info->dev_class, 3);
2576 data.clock_offset = info->clock_offset;
2577 data.rssi = info->rssi;
2578 data.ssp_mode = 0x00;
2579
2580 name_known = hci_inquiry_cache_update(hdev, &data,
2581 false);
2582 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2583 info->dev_class, info->rssi,
2584 !name_known, NULL);
2585 }
2586 } else {
2587 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2588
2589 for (; num_rsp; num_rsp--, info++) {
2590 bacpy(&data.bdaddr, &info->bdaddr);
2591 data.pscan_rep_mode = info->pscan_rep_mode;
2592 data.pscan_period_mode = info->pscan_period_mode;
2593 data.pscan_mode = 0x00;
2594 memcpy(data.dev_class, info->dev_class, 3);
2595 data.clock_offset = info->clock_offset;
2596 data.rssi = info->rssi;
2597 data.ssp_mode = 0x00;
2598 name_known = hci_inquiry_cache_update(hdev, &data,
2599 false);
2600 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2601 info->dev_class, info->rssi,
2602 !name_known, NULL);
2603 }
2604 }
2605
2606 hci_dev_unlock(hdev);
2607 }
2608
2609 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2610 {
2611 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2612 struct hci_conn *conn;
2613
2614 BT_DBG("%s", hdev->name);
2615
2616 hci_dev_lock(hdev);
2617
2618 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2619 if (!conn)
2620 goto unlock;
2621
2622 if (!ev->status && ev->page == 0x01) {
2623 struct inquiry_entry *ie;
2624
2625 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2626 if (ie)
2627 ie->data.ssp_mode = (ev->features[0] & 0x01);
2628
2629 conn->ssp_mode = (ev->features[0] & 0x01);
2630 }
2631
2632 if (conn->state != BT_CONFIG)
2633 goto unlock;
2634
2635 if (!ev->status) {
2636 struct hci_cp_remote_name_req cp;
2637 memset(&cp, 0, sizeof(cp));
2638 bacpy(&cp.bdaddr, &conn->dst);
2639 cp.pscan_rep_mode = 0x02;
2640 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2641 }
2642
2643 if (!hci_outgoing_auth_needed(hdev, conn)) {
2644 conn->state = BT_CONNECTED;
2645 hci_proto_connect_cfm(conn, ev->status);
2646 hci_conn_put(conn);
2647 }
2648
2649 unlock:
2650 hci_dev_unlock(hdev);
2651 }
2652
2653 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2654 {
2655 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2656 struct hci_conn *conn;
2657
2658 BT_DBG("%s status %d", hdev->name, ev->status);
2659
2660 hci_dev_lock(hdev);
2661
2662 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2663 if (!conn) {
2664 if (ev->link_type == ESCO_LINK)
2665 goto unlock;
2666
2667 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2668 if (!conn)
2669 goto unlock;
2670
2671 conn->type = SCO_LINK;
2672 }
2673
2674 switch (ev->status) {
2675 case 0x00:
2676 conn->handle = __le16_to_cpu(ev->handle);
2677 conn->state = BT_CONNECTED;
2678
2679 hci_conn_hold_device(conn);
2680 hci_conn_add_sysfs(conn);
2681 break;
2682
2683 case 0x11: /* Unsupported Feature or Parameter Value */
2684 case 0x1c: /* SCO interval rejected */
2685 case 0x1a: /* Unsupported Remote Feature */
2686 case 0x1f: /* Unspecified error */
2687 if (conn->out && conn->attempt < 2) {
2688 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2689 (hdev->esco_type & EDR_ESCO_MASK);
2690 hci_setup_sync(conn, conn->link->handle);
2691 goto unlock;
2692 }
2693 /* fall through */
2694
2695 default:
2696 conn->state = BT_CLOSED;
2697 break;
2698 }
2699
2700 hci_proto_connect_cfm(conn, ev->status);
2701 if (ev->status)
2702 hci_conn_del(conn);
2703
2704 unlock:
2705 hci_dev_unlock(hdev);
2706 }
2707
2708 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2709 {
2710 BT_DBG("%s", hdev->name);
2711 }
2712
2713 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2714 {
2715 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2716
2717 BT_DBG("%s status %d", hdev->name, ev->status);
2718 }
2719
2720 static inline bool eir_has_complete_name(u8 *data, size_t data_len)
2721 {
2722 u8 field_len;
2723 size_t parsed;
2724
2725 for (parsed = 0; parsed < data_len - 1; parsed += field_len) {
2726 field_len = data[0];
2727
2728 if (field_len == 0)
2729 break;
2730
2731 parsed += field_len + 1;
2732
2733 if (parsed > data_len)
2734 break;
2735
2736 if (data[1] == EIR_NAME_COMPLETE)
2737 return true;
2738
2739 data += field_len + 1;
2740 }
2741
2742 return false;
2743 }
2744
2745 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2746 {
2747 struct inquiry_data data;
2748 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2749 int num_rsp = *((__u8 *) skb->data);
2750
2751 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2752
2753 if (!num_rsp)
2754 return;
2755
2756 hci_dev_lock(hdev);
2757
2758 for (; num_rsp; num_rsp--, info++) {
2759 bool name_known;
2760
2761 bacpy(&data.bdaddr, &info->bdaddr);
2762 data.pscan_rep_mode = info->pscan_rep_mode;
2763 data.pscan_period_mode = info->pscan_period_mode;
2764 data.pscan_mode = 0x00;
2765 memcpy(data.dev_class, info->dev_class, 3);
2766 data.clock_offset = info->clock_offset;
2767 data.rssi = info->rssi;
2768 data.ssp_mode = 0x01;
2769
2770 if (test_bit(HCI_MGMT, &hdev->flags))
2771 name_known = eir_has_complete_name(info->data,
2772 sizeof(info->data));
2773 else
2774 name_known = true;
2775
2776 name_known = hci_inquiry_cache_update(hdev, &data, name_known);
2777 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2778 info->dev_class, info->rssi,
2779 !name_known, info->data);
2780 }
2781
2782 hci_dev_unlock(hdev);
2783 }
2784
2785 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2786 {
2787 /* If remote requests dedicated bonding follow that lead */
2788 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2789 /* If both remote and local IO capabilities allow MITM
2790 * protection then require it, otherwise don't */
2791 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2792 return 0x02;
2793 else
2794 return 0x03;
2795 }
2796
2797 /* If remote requests no-bonding follow that lead */
2798 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2799 return conn->remote_auth | (conn->auth_type & 0x01);
2800
2801 return conn->auth_type;
2802 }
2803
2804 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2805 {
2806 struct hci_ev_io_capa_request *ev = (void *) skb->data;
2807 struct hci_conn *conn;
2808
2809 BT_DBG("%s", hdev->name);
2810
2811 hci_dev_lock(hdev);
2812
2813 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2814 if (!conn)
2815 goto unlock;
2816
2817 hci_conn_hold(conn);
2818
2819 if (!test_bit(HCI_MGMT, &hdev->flags))
2820 goto unlock;
2821
2822 if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2823 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2824 struct hci_cp_io_capability_reply cp;
2825
2826 bacpy(&cp.bdaddr, &ev->bdaddr);
2827 cp.capability = conn->io_capability;
2828 conn->auth_type = hci_get_auth_req(conn);
2829 cp.authentication = conn->auth_type;
2830
2831 if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2832 hci_find_remote_oob_data(hdev, &conn->dst))
2833 cp.oob_data = 0x01;
2834 else
2835 cp.oob_data = 0x00;
2836
2837 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2838 sizeof(cp), &cp);
2839 } else {
2840 struct hci_cp_io_capability_neg_reply cp;
2841
2842 bacpy(&cp.bdaddr, &ev->bdaddr);
2843 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
2844
2845 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2846 sizeof(cp), &cp);
2847 }
2848
2849 unlock:
2850 hci_dev_unlock(hdev);
2851 }
2852
2853 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2854 {
2855 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2856 struct hci_conn *conn;
2857
2858 BT_DBG("%s", hdev->name);
2859
2860 hci_dev_lock(hdev);
2861
2862 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2863 if (!conn)
2864 goto unlock;
2865
2866 conn->remote_cap = ev->capability;
2867 conn->remote_oob = ev->oob_data;
2868 conn->remote_auth = ev->authentication;
2869
2870 unlock:
2871 hci_dev_unlock(hdev);
2872 }
2873
2874 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2875 struct sk_buff *skb)
2876 {
2877 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2878 int loc_mitm, rem_mitm, confirm_hint = 0;
2879 struct hci_conn *conn;
2880
2881 BT_DBG("%s", hdev->name);
2882
2883 hci_dev_lock(hdev);
2884
2885 if (!test_bit(HCI_MGMT, &hdev->flags))
2886 goto unlock;
2887
2888 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2889 if (!conn)
2890 goto unlock;
2891
2892 loc_mitm = (conn->auth_type & 0x01);
2893 rem_mitm = (conn->remote_auth & 0x01);
2894
2895 /* If we require MITM but the remote device can't provide that
2896 * (it has NoInputNoOutput) then reject the confirmation
2897 * request. The only exception is when we're dedicated bonding
2898 * initiators (connect_cfm_cb set) since then we always have the MITM
2899 * bit set. */
2900 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
2901 BT_DBG("Rejecting request: remote device can't provide MITM");
2902 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
2903 sizeof(ev->bdaddr), &ev->bdaddr);
2904 goto unlock;
2905 }
2906
2907 /* If no side requires MITM protection; auto-accept */
2908 if ((!loc_mitm || conn->remote_cap == 0x03) &&
2909 (!rem_mitm || conn->io_capability == 0x03)) {
2910
2911 /* If we're not the initiators request authorization to
2912 * proceed from user space (mgmt_user_confirm with
2913 * confirm_hint set to 1). */
2914 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
2915 BT_DBG("Confirming auto-accept as acceptor");
2916 confirm_hint = 1;
2917 goto confirm;
2918 }
2919
2920 BT_DBG("Auto-accept of user confirmation with %ums delay",
2921 hdev->auto_accept_delay);
2922
2923 if (hdev->auto_accept_delay > 0) {
2924 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
2925 mod_timer(&conn->auto_accept_timer, jiffies + delay);
2926 goto unlock;
2927 }
2928
2929 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
2930 sizeof(ev->bdaddr), &ev->bdaddr);
2931 goto unlock;
2932 }
2933
2934 confirm:
2935 mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
2936 confirm_hint);
2937
2938 unlock:
2939 hci_dev_unlock(hdev);
2940 }
2941
2942 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
2943 struct sk_buff *skb)
2944 {
2945 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
2946
2947 BT_DBG("%s", hdev->name);
2948
2949 hci_dev_lock(hdev);
2950
2951 if (test_bit(HCI_MGMT, &hdev->flags))
2952 mgmt_user_passkey_request(hdev, &ev->bdaddr);
2953
2954 hci_dev_unlock(hdev);
2955 }
2956
2957 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2958 {
2959 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
2960 struct hci_conn *conn;
2961
2962 BT_DBG("%s", hdev->name);
2963
2964 hci_dev_lock(hdev);
2965
2966 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2967 if (!conn)
2968 goto unlock;
2969
2970 /* To avoid duplicate auth_failed events to user space we check
2971 * the HCI_CONN_AUTH_PEND flag which will be set if we
2972 * initiated the authentication. A traditional auth_complete
2973 * event gets always produced as initiator and is also mapped to
2974 * the mgmt_auth_failed event */
2975 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2976 mgmt_auth_failed(hdev, &conn->dst, ev->status);
2977
2978 hci_conn_put(conn);
2979
2980 unlock:
2981 hci_dev_unlock(hdev);
2982 }
2983
2984 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2985 {
2986 struct hci_ev_remote_host_features *ev = (void *) skb->data;
2987 struct inquiry_entry *ie;
2988
2989 BT_DBG("%s", hdev->name);
2990
2991 hci_dev_lock(hdev);
2992
2993 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2994 if (ie)
2995 ie->data.ssp_mode = (ev->features[0] & 0x01);
2996
2997 hci_dev_unlock(hdev);
2998 }
2999
3000 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3001 struct sk_buff *skb)
3002 {
3003 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3004 struct oob_data *data;
3005
3006 BT_DBG("%s", hdev->name);
3007
3008 hci_dev_lock(hdev);
3009
3010 if (!test_bit(HCI_MGMT, &hdev->flags))
3011 goto unlock;
3012
3013 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3014 if (data) {
3015 struct hci_cp_remote_oob_data_reply cp;
3016
3017 bacpy(&cp.bdaddr, &ev->bdaddr);
3018 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3019 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3020
3021 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3022 &cp);
3023 } else {
3024 struct hci_cp_remote_oob_data_neg_reply cp;
3025
3026 bacpy(&cp.bdaddr, &ev->bdaddr);
3027 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3028 &cp);
3029 }
3030
3031 unlock:
3032 hci_dev_unlock(hdev);
3033 }
3034
3035 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3036 {
3037 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3038 struct hci_conn *conn;
3039
3040 BT_DBG("%s status %d", hdev->name, ev->status);
3041
3042 hci_dev_lock(hdev);
3043
3044 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3045 if (!conn) {
3046 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3047 if (!conn) {
3048 BT_ERR("No memory for new connection");
3049 hci_dev_unlock(hdev);
3050 return;
3051 }
3052
3053 conn->dst_type = ev->bdaddr_type;
3054 }
3055
3056 if (ev->status) {
3057 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3058 conn->dst_type, ev->status);
3059 hci_proto_connect_cfm(conn, ev->status);
3060 conn->state = BT_CLOSED;
3061 hci_conn_del(conn);
3062 goto unlock;
3063 }
3064
3065 mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type);
3066
3067 conn->sec_level = BT_SECURITY_LOW;
3068 conn->handle = __le16_to_cpu(ev->handle);
3069 conn->state = BT_CONNECTED;
3070
3071 hci_conn_hold_device(conn);
3072 hci_conn_add_sysfs(conn);
3073
3074 hci_proto_connect_cfm(conn, ev->status);
3075
3076 unlock:
3077 hci_dev_unlock(hdev);
3078 }
3079
3080 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3081 struct sk_buff *skb)
3082 {
3083 u8 num_reports = skb->data[0];
3084 void *ptr = &skb->data[1];
3085
3086 hci_dev_lock(hdev);
3087
3088 while (num_reports--) {
3089 struct hci_ev_le_advertising_info *ev = ptr;
3090
3091 hci_add_adv_entry(hdev, ev);
3092
3093 ptr += sizeof(*ev) + ev->length + 1;
3094 }
3095
3096 hci_dev_unlock(hdev);
3097 }
3098
3099 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3100 struct sk_buff *skb)
3101 {
3102 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3103 struct hci_cp_le_ltk_reply cp;
3104 struct hci_cp_le_ltk_neg_reply neg;
3105 struct hci_conn *conn;
3106 struct link_key *ltk;
3107
3108 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3109
3110 hci_dev_lock(hdev);
3111
3112 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3113 if (conn == NULL)
3114 goto not_found;
3115
3116 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3117 if (ltk == NULL)
3118 goto not_found;
3119
3120 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3121 cp.handle = cpu_to_le16(conn->handle);
3122 conn->pin_length = ltk->pin_len;
3123
3124 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3125
3126 hci_dev_unlock(hdev);
3127
3128 return;
3129
3130 not_found:
3131 neg.handle = ev->handle;
3132 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3133 hci_dev_unlock(hdev);
3134 }
3135
3136 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3137 {
3138 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3139
3140 skb_pull(skb, sizeof(*le_ev));
3141
3142 switch (le_ev->subevent) {
3143 case HCI_EV_LE_CONN_COMPLETE:
3144 hci_le_conn_complete_evt(hdev, skb);
3145 break;
3146
3147 case HCI_EV_LE_ADVERTISING_REPORT:
3148 hci_le_adv_report_evt(hdev, skb);
3149 break;
3150
3151 case HCI_EV_LE_LTK_REQ:
3152 hci_le_ltk_request_evt(hdev, skb);
3153 break;
3154
3155 default:
3156 break;
3157 }
3158 }
3159
3160 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3161 {
3162 struct hci_event_hdr *hdr = (void *) skb->data;
3163 __u8 event = hdr->evt;
3164
3165 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3166
3167 switch (event) {
3168 case HCI_EV_INQUIRY_COMPLETE:
3169 hci_inquiry_complete_evt(hdev, skb);
3170 break;
3171
3172 case HCI_EV_INQUIRY_RESULT:
3173 hci_inquiry_result_evt(hdev, skb);
3174 break;
3175
3176 case HCI_EV_CONN_COMPLETE:
3177 hci_conn_complete_evt(hdev, skb);
3178 break;
3179
3180 case HCI_EV_CONN_REQUEST:
3181 hci_conn_request_evt(hdev, skb);
3182 break;
3183
3184 case HCI_EV_DISCONN_COMPLETE:
3185 hci_disconn_complete_evt(hdev, skb);
3186 break;
3187
3188 case HCI_EV_AUTH_COMPLETE:
3189 hci_auth_complete_evt(hdev, skb);
3190 break;
3191
3192 case HCI_EV_REMOTE_NAME:
3193 hci_remote_name_evt(hdev, skb);
3194 break;
3195
3196 case HCI_EV_ENCRYPT_CHANGE:
3197 hci_encrypt_change_evt(hdev, skb);
3198 break;
3199
3200 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3201 hci_change_link_key_complete_evt(hdev, skb);
3202 break;
3203
3204 case HCI_EV_REMOTE_FEATURES:
3205 hci_remote_features_evt(hdev, skb);
3206 break;
3207
3208 case HCI_EV_REMOTE_VERSION:
3209 hci_remote_version_evt(hdev, skb);
3210 break;
3211
3212 case HCI_EV_QOS_SETUP_COMPLETE:
3213 hci_qos_setup_complete_evt(hdev, skb);
3214 break;
3215
3216 case HCI_EV_CMD_COMPLETE:
3217 hci_cmd_complete_evt(hdev, skb);
3218 break;
3219
3220 case HCI_EV_CMD_STATUS:
3221 hci_cmd_status_evt(hdev, skb);
3222 break;
3223
3224 case HCI_EV_ROLE_CHANGE:
3225 hci_role_change_evt(hdev, skb);
3226 break;
3227
3228 case HCI_EV_NUM_COMP_PKTS:
3229 hci_num_comp_pkts_evt(hdev, skb);
3230 break;
3231
3232 case HCI_EV_MODE_CHANGE:
3233 hci_mode_change_evt(hdev, skb);
3234 break;
3235
3236 case HCI_EV_PIN_CODE_REQ:
3237 hci_pin_code_request_evt(hdev, skb);
3238 break;
3239
3240 case HCI_EV_LINK_KEY_REQ:
3241 hci_link_key_request_evt(hdev, skb);
3242 break;
3243
3244 case HCI_EV_LINK_KEY_NOTIFY:
3245 hci_link_key_notify_evt(hdev, skb);
3246 break;
3247
3248 case HCI_EV_CLOCK_OFFSET:
3249 hci_clock_offset_evt(hdev, skb);
3250 break;
3251
3252 case HCI_EV_PKT_TYPE_CHANGE:
3253 hci_pkt_type_change_evt(hdev, skb);
3254 break;
3255
3256 case HCI_EV_PSCAN_REP_MODE:
3257 hci_pscan_rep_mode_evt(hdev, skb);
3258 break;
3259
3260 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3261 hci_inquiry_result_with_rssi_evt(hdev, skb);
3262 break;
3263
3264 case HCI_EV_REMOTE_EXT_FEATURES:
3265 hci_remote_ext_features_evt(hdev, skb);
3266 break;
3267
3268 case HCI_EV_SYNC_CONN_COMPLETE:
3269 hci_sync_conn_complete_evt(hdev, skb);
3270 break;
3271
3272 case HCI_EV_SYNC_CONN_CHANGED:
3273 hci_sync_conn_changed_evt(hdev, skb);
3274 break;
3275
3276 case HCI_EV_SNIFF_SUBRATE:
3277 hci_sniff_subrate_evt(hdev, skb);
3278 break;
3279
3280 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3281 hci_extended_inquiry_result_evt(hdev, skb);
3282 break;
3283
3284 case HCI_EV_IO_CAPA_REQUEST:
3285 hci_io_capa_request_evt(hdev, skb);
3286 break;
3287
3288 case HCI_EV_IO_CAPA_REPLY:
3289 hci_io_capa_reply_evt(hdev, skb);
3290 break;
3291
3292 case HCI_EV_USER_CONFIRM_REQUEST:
3293 hci_user_confirm_request_evt(hdev, skb);
3294 break;
3295
3296 case HCI_EV_USER_PASSKEY_REQUEST:
3297 hci_user_passkey_request_evt(hdev, skb);
3298 break;
3299
3300 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3301 hci_simple_pair_complete_evt(hdev, skb);
3302 break;
3303
3304 case HCI_EV_REMOTE_HOST_FEATURES:
3305 hci_remote_host_features_evt(hdev, skb);
3306 break;
3307
3308 case HCI_EV_LE_META:
3309 hci_le_meta_evt(hdev, skb);
3310 break;
3311
3312 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3313 hci_remote_oob_data_request_evt(hdev, skb);
3314 break;
3315
3316 default:
3317 BT_DBG("%s event 0x%x", hdev->name, event);
3318 break;
3319 }
3320
3321 kfree_skb(skb);
3322 hdev->stat.evt_rx++;
3323 }
3324
3325 /* Generate internal stack event */
3326 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3327 {
3328 struct hci_event_hdr *hdr;
3329 struct hci_ev_stack_internal *ev;
3330 struct sk_buff *skb;
3331
3332 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3333 if (!skb)
3334 return;
3335
3336 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3337 hdr->evt = HCI_EV_STACK_INTERNAL;
3338 hdr->plen = sizeof(*ev) + dlen;
3339
3340 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
3341 ev->type = type;
3342 memcpy(ev->data, data, dlen);
3343
3344 bt_cb(skb)->incoming = 1;
3345 __net_timestamp(skb);
3346
3347 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3348 skb->dev = (void *) hdev;
3349 hci_send_to_sock(hdev, skb, NULL);
3350 kfree_skb(skb);
3351 }
3352
3353 module_param(enable_le, bool, 0644);
3354 MODULE_PARM_DESC(enable_le, "Enable LE support");