041a35eb25c6ccc2a5dfa9040e8b9a8db9de9e98
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 static bool enable_le;
49
50 /* Handle HCI Event packets */
51
52 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
53 {
54 __u8 status = *((__u8 *) skb->data);
55
56 BT_DBG("%s status 0x%x", hdev->name, status);
57
58 if (status) {
59 hci_dev_lock(hdev);
60 mgmt_stop_discovery_failed(hdev, status);
61 hci_dev_unlock(hdev);
62 return;
63 }
64
65 clear_bit(HCI_INQUIRY, &hdev->flags);
66
67 hci_dev_lock(hdev);
68 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
69 hci_dev_unlock(hdev);
70
71 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
72
73 hci_conn_check_pending(hdev);
74 }
75
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77 {
78 __u8 status = *((__u8 *) skb->data);
79
80 BT_DBG("%s status 0x%x", hdev->name, status);
81
82 if (status)
83 return;
84
85 hci_conn_check_pending(hdev);
86 }
87
88 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160 {
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163 BT_DBG("%s status 0x%x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 hdev->link_policy = __le16_to_cpu(rp->policy);
169 }
170
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
172 {
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%x", hdev->name, status);
177
178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 if (!sent)
180 return;
181
182 if (!status)
183 hdev->link_policy = get_unaligned_le16(sent);
184
185 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 hci_req_complete(hdev, HCI_OP_RESET, status);
197
198 /* Reset all flags, except persistent ones */
199 hdev->dev_flags &= BIT(HCI_MGMT) | BIT(HCI_SETUP) | BIT(HCI_AUTO_OFF);
200 }
201
202 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
203 {
204 __u8 status = *((__u8 *) skb->data);
205 void *sent;
206
207 BT_DBG("%s status 0x%x", hdev->name, status);
208
209 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
210 if (!sent)
211 return;
212
213 hci_dev_lock(hdev);
214
215 if (test_bit(HCI_MGMT, &hdev->dev_flags))
216 mgmt_set_local_name_complete(hdev, sent, status);
217
218 if (status == 0)
219 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
220
221 hci_dev_unlock(hdev);
222 }
223
224 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
225 {
226 struct hci_rp_read_local_name *rp = (void *) skb->data;
227
228 BT_DBG("%s status 0x%x", hdev->name, rp->status);
229
230 if (rp->status)
231 return;
232
233 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
234 }
235
236 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
237 {
238 __u8 status = *((__u8 *) skb->data);
239 void *sent;
240
241 BT_DBG("%s status 0x%x", hdev->name, status);
242
243 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
244 if (!sent)
245 return;
246
247 if (!status) {
248 __u8 param = *((__u8 *) sent);
249
250 if (param == AUTH_ENABLED)
251 set_bit(HCI_AUTH, &hdev->flags);
252 else
253 clear_bit(HCI_AUTH, &hdev->flags);
254 }
255
256 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
257 }
258
259 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
260 {
261 __u8 status = *((__u8 *) skb->data);
262 void *sent;
263
264 BT_DBG("%s status 0x%x", hdev->name, status);
265
266 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
267 if (!sent)
268 return;
269
270 if (!status) {
271 __u8 param = *((__u8 *) sent);
272
273 if (param)
274 set_bit(HCI_ENCRYPT, &hdev->flags);
275 else
276 clear_bit(HCI_ENCRYPT, &hdev->flags);
277 }
278
279 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
280 }
281
282 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
283 {
284 __u8 param, status = *((__u8 *) skb->data);
285 int old_pscan, old_iscan;
286 void *sent;
287
288 BT_DBG("%s status 0x%x", hdev->name, status);
289
290 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
291 if (!sent)
292 return;
293
294 param = *((__u8 *) sent);
295
296 hci_dev_lock(hdev);
297
298 if (status != 0) {
299 mgmt_write_scan_failed(hdev, param, status);
300 hdev->discov_timeout = 0;
301 goto done;
302 }
303
304 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
305 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
306
307 if (param & SCAN_INQUIRY) {
308 set_bit(HCI_ISCAN, &hdev->flags);
309 if (!old_iscan)
310 mgmt_discoverable(hdev, 1);
311 if (hdev->discov_timeout > 0) {
312 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
313 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
314 to);
315 }
316 } else if (old_iscan)
317 mgmt_discoverable(hdev, 0);
318
319 if (param & SCAN_PAGE) {
320 set_bit(HCI_PSCAN, &hdev->flags);
321 if (!old_pscan)
322 mgmt_connectable(hdev, 1);
323 } else if (old_pscan)
324 mgmt_connectable(hdev, 0);
325
326 done:
327 hci_dev_unlock(hdev);
328 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
329 }
330
331 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
332 {
333 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
334
335 BT_DBG("%s status 0x%x", hdev->name, rp->status);
336
337 if (rp->status)
338 return;
339
340 memcpy(hdev->dev_class, rp->dev_class, 3);
341
342 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
343 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
344 }
345
346 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
347 {
348 __u8 status = *((__u8 *) skb->data);
349 void *sent;
350
351 BT_DBG("%s status 0x%x", hdev->name, status);
352
353 if (status)
354 return;
355
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 if (!sent)
358 return;
359
360 memcpy(hdev->dev_class, sent, 3);
361 }
362
363 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
364 {
365 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
366 __u16 setting;
367
368 BT_DBG("%s status 0x%x", hdev->name, rp->status);
369
370 if (rp->status)
371 return;
372
373 setting = __le16_to_cpu(rp->voice_setting);
374
375 if (hdev->voice_setting == setting)
376 return;
377
378 hdev->voice_setting = setting;
379
380 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
381
382 if (hdev->notify)
383 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
384 }
385
386 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
387 {
388 __u8 status = *((__u8 *) skb->data);
389 __u16 setting;
390 void *sent;
391
392 BT_DBG("%s status 0x%x", hdev->name, status);
393
394 if (status)
395 return;
396
397 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
398 if (!sent)
399 return;
400
401 setting = get_unaligned_le16(sent);
402
403 if (hdev->voice_setting == setting)
404 return;
405
406 hdev->voice_setting = setting;
407
408 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
409
410 if (hdev->notify)
411 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
412 }
413
414 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
415 {
416 __u8 status = *((__u8 *) skb->data);
417
418 BT_DBG("%s status 0x%x", hdev->name, status);
419
420 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
421 }
422
423 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
426
427 BT_DBG("%s status 0x%x", hdev->name, rp->status);
428
429 if (rp->status)
430 return;
431
432 if (rp->mode)
433 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
434 else
435 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
436 }
437
438 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
439 {
440 __u8 status = *((__u8 *) skb->data);
441 void *sent;
442
443 BT_DBG("%s status 0x%x", hdev->name, status);
444
445 if (status)
446 return;
447
448 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
449 if (!sent)
450 return;
451
452 if (*((u8 *) sent))
453 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
454 else
455 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
456 }
457
458 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
459 {
460 if (hdev->features[6] & LMP_EXT_INQ)
461 return 2;
462
463 if (hdev->features[3] & LMP_RSSI_INQ)
464 return 1;
465
466 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
467 hdev->lmp_subver == 0x0757)
468 return 1;
469
470 if (hdev->manufacturer == 15) {
471 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
472 return 1;
473 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
474 return 1;
475 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
476 return 1;
477 }
478
479 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
480 hdev->lmp_subver == 0x1805)
481 return 1;
482
483 return 0;
484 }
485
486 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
487 {
488 u8 mode;
489
490 mode = hci_get_inquiry_mode(hdev);
491
492 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
493 }
494
495 static void hci_setup_event_mask(struct hci_dev *hdev)
496 {
497 /* The second byte is 0xff instead of 0x9f (two reserved bits
498 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
499 * command otherwise */
500 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
501
502 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
503 * any event mask for pre 1.2 devices */
504 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
505 return;
506
507 events[4] |= 0x01; /* Flow Specification Complete */
508 events[4] |= 0x02; /* Inquiry Result with RSSI */
509 events[4] |= 0x04; /* Read Remote Extended Features Complete */
510 events[5] |= 0x08; /* Synchronous Connection Complete */
511 events[5] |= 0x10; /* Synchronous Connection Changed */
512
513 if (hdev->features[3] & LMP_RSSI_INQ)
514 events[4] |= 0x04; /* Inquiry Result with RSSI */
515
516 if (hdev->features[5] & LMP_SNIFF_SUBR)
517 events[5] |= 0x20; /* Sniff Subrating */
518
519 if (hdev->features[5] & LMP_PAUSE_ENC)
520 events[5] |= 0x80; /* Encryption Key Refresh Complete */
521
522 if (hdev->features[6] & LMP_EXT_INQ)
523 events[5] |= 0x40; /* Extended Inquiry Result */
524
525 if (hdev->features[6] & LMP_NO_FLUSH)
526 events[7] |= 0x01; /* Enhanced Flush Complete */
527
528 if (hdev->features[7] & LMP_LSTO)
529 events[6] |= 0x80; /* Link Supervision Timeout Changed */
530
531 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
532 events[6] |= 0x01; /* IO Capability Request */
533 events[6] |= 0x02; /* IO Capability Response */
534 events[6] |= 0x04; /* User Confirmation Request */
535 events[6] |= 0x08; /* User Passkey Request */
536 events[6] |= 0x10; /* Remote OOB Data Request */
537 events[6] |= 0x20; /* Simple Pairing Complete */
538 events[7] |= 0x04; /* User Passkey Notification */
539 events[7] |= 0x08; /* Keypress Notification */
540 events[7] |= 0x10; /* Remote Host Supported
541 * Features Notification */
542 }
543
544 if (hdev->features[4] & LMP_LE)
545 events[7] |= 0x20; /* LE Meta-Event */
546
547 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
548 }
549
550 static void hci_set_le_support(struct hci_dev *hdev)
551 {
552 struct hci_cp_write_le_host_supported cp;
553
554 memset(&cp, 0, sizeof(cp));
555
556 if (enable_le) {
557 cp.le = 1;
558 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
559 }
560
561 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
562 }
563
564 static void hci_setup(struct hci_dev *hdev)
565 {
566 if (hdev->dev_type != HCI_BREDR)
567 return;
568
569 hci_setup_event_mask(hdev);
570
571 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
572 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
573
574 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
575 u8 mode = 0x01;
576 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
577 }
578
579 if (hdev->features[3] & LMP_RSSI_INQ)
580 hci_setup_inquiry_mode(hdev);
581
582 if (hdev->features[7] & LMP_INQ_TX_PWR)
583 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
584
585 if (hdev->features[7] & LMP_EXTFEATURES) {
586 struct hci_cp_read_local_ext_features cp;
587
588 cp.page = 0x01;
589 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
590 sizeof(cp), &cp);
591 }
592
593 if (hdev->features[4] & LMP_LE)
594 hci_set_le_support(hdev);
595 }
596
597 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
598 {
599 struct hci_rp_read_local_version *rp = (void *) skb->data;
600
601 BT_DBG("%s status 0x%x", hdev->name, rp->status);
602
603 if (rp->status)
604 return;
605
606 hdev->hci_ver = rp->hci_ver;
607 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
608 hdev->lmp_ver = rp->lmp_ver;
609 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
610 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
611
612 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
613 hdev->manufacturer,
614 hdev->hci_ver, hdev->hci_rev);
615
616 if (test_bit(HCI_INIT, &hdev->flags))
617 hci_setup(hdev);
618 }
619
620 static void hci_setup_link_policy(struct hci_dev *hdev)
621 {
622 u16 link_policy = 0;
623
624 if (hdev->features[0] & LMP_RSWITCH)
625 link_policy |= HCI_LP_RSWITCH;
626 if (hdev->features[0] & LMP_HOLD)
627 link_policy |= HCI_LP_HOLD;
628 if (hdev->features[0] & LMP_SNIFF)
629 link_policy |= HCI_LP_SNIFF;
630 if (hdev->features[1] & LMP_PARK)
631 link_policy |= HCI_LP_PARK;
632
633 link_policy = cpu_to_le16(link_policy);
634 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
635 sizeof(link_policy), &link_policy);
636 }
637
638 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
639 {
640 struct hci_rp_read_local_commands *rp = (void *) skb->data;
641
642 BT_DBG("%s status 0x%x", hdev->name, rp->status);
643
644 if (rp->status)
645 goto done;
646
647 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
648
649 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
650 hci_setup_link_policy(hdev);
651
652 done:
653 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
654 }
655
656 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
657 {
658 struct hci_rp_read_local_features *rp = (void *) skb->data;
659
660 BT_DBG("%s status 0x%x", hdev->name, rp->status);
661
662 if (rp->status)
663 return;
664
665 memcpy(hdev->features, rp->features, 8);
666
667 /* Adjust default settings according to features
668 * supported by device. */
669
670 if (hdev->features[0] & LMP_3SLOT)
671 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
672
673 if (hdev->features[0] & LMP_5SLOT)
674 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
675
676 if (hdev->features[1] & LMP_HV2) {
677 hdev->pkt_type |= (HCI_HV2);
678 hdev->esco_type |= (ESCO_HV2);
679 }
680
681 if (hdev->features[1] & LMP_HV3) {
682 hdev->pkt_type |= (HCI_HV3);
683 hdev->esco_type |= (ESCO_HV3);
684 }
685
686 if (hdev->features[3] & LMP_ESCO)
687 hdev->esco_type |= (ESCO_EV3);
688
689 if (hdev->features[4] & LMP_EV4)
690 hdev->esco_type |= (ESCO_EV4);
691
692 if (hdev->features[4] & LMP_EV5)
693 hdev->esco_type |= (ESCO_EV5);
694
695 if (hdev->features[5] & LMP_EDR_ESCO_2M)
696 hdev->esco_type |= (ESCO_2EV3);
697
698 if (hdev->features[5] & LMP_EDR_ESCO_3M)
699 hdev->esco_type |= (ESCO_3EV3);
700
701 if (hdev->features[5] & LMP_EDR_3S_ESCO)
702 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
703
704 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
705 hdev->features[0], hdev->features[1],
706 hdev->features[2], hdev->features[3],
707 hdev->features[4], hdev->features[5],
708 hdev->features[6], hdev->features[7]);
709 }
710
711 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
712 struct sk_buff *skb)
713 {
714 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
715
716 BT_DBG("%s status 0x%x", hdev->name, rp->status);
717
718 if (rp->status)
719 return;
720
721 switch (rp->page) {
722 case 0:
723 memcpy(hdev->features, rp->features, 8);
724 break;
725 case 1:
726 memcpy(hdev->host_features, rp->features, 8);
727 break;
728 }
729
730 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
731 }
732
733 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
734 struct sk_buff *skb)
735 {
736 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
737
738 BT_DBG("%s status 0x%x", hdev->name, rp->status);
739
740 if (rp->status)
741 return;
742
743 hdev->flow_ctl_mode = rp->mode;
744
745 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
746 }
747
748 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
749 {
750 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
751
752 BT_DBG("%s status 0x%x", hdev->name, rp->status);
753
754 if (rp->status)
755 return;
756
757 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
758 hdev->sco_mtu = rp->sco_mtu;
759 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
760 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
761
762 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
763 hdev->sco_mtu = 64;
764 hdev->sco_pkts = 8;
765 }
766
767 hdev->acl_cnt = hdev->acl_pkts;
768 hdev->sco_cnt = hdev->sco_pkts;
769
770 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
771 hdev->acl_mtu, hdev->acl_pkts,
772 hdev->sco_mtu, hdev->sco_pkts);
773 }
774
775 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
776 {
777 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
778
779 BT_DBG("%s status 0x%x", hdev->name, rp->status);
780
781 if (!rp->status)
782 bacpy(&hdev->bdaddr, &rp->bdaddr);
783
784 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
785 }
786
787 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
788 struct sk_buff *skb)
789 {
790 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
791
792 BT_DBG("%s status 0x%x", hdev->name, rp->status);
793
794 if (rp->status)
795 return;
796
797 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
798 hdev->block_len = __le16_to_cpu(rp->block_len);
799 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
800
801 hdev->block_cnt = hdev->num_blocks;
802
803 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
804 hdev->block_cnt, hdev->block_len);
805
806 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
807 }
808
809 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
810 {
811 __u8 status = *((__u8 *) skb->data);
812
813 BT_DBG("%s status 0x%x", hdev->name, status);
814
815 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
816 }
817
818 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
819 struct sk_buff *skb)
820 {
821 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
822
823 BT_DBG("%s status 0x%x", hdev->name, rp->status);
824
825 if (rp->status)
826 return;
827
828 hdev->amp_status = rp->amp_status;
829 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
830 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
831 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
832 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
833 hdev->amp_type = rp->amp_type;
834 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
835 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
836 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
837 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
838
839 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
840 }
841
842 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
843 struct sk_buff *skb)
844 {
845 __u8 status = *((__u8 *) skb->data);
846
847 BT_DBG("%s status 0x%x", hdev->name, status);
848
849 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
850 }
851
852 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
853 {
854 __u8 status = *((__u8 *) skb->data);
855
856 BT_DBG("%s status 0x%x", hdev->name, status);
857
858 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
859 }
860
861 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
862 struct sk_buff *skb)
863 {
864 __u8 status = *((__u8 *) skb->data);
865
866 BT_DBG("%s status 0x%x", hdev->name, status);
867
868 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
869 }
870
871 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
872 struct sk_buff *skb)
873 {
874 __u8 status = *((__u8 *) skb->data);
875
876 BT_DBG("%s status 0x%x", hdev->name, status);
877
878 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
879 }
880
881 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
882 {
883 __u8 status = *((__u8 *) skb->data);
884
885 BT_DBG("%s status 0x%x", hdev->name, status);
886
887 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
888 }
889
890 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
891 {
892 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
893 struct hci_cp_pin_code_reply *cp;
894 struct hci_conn *conn;
895
896 BT_DBG("%s status 0x%x", hdev->name, rp->status);
897
898 hci_dev_lock(hdev);
899
900 if (test_bit(HCI_MGMT, &hdev->dev_flags))
901 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
902
903 if (rp->status != 0)
904 goto unlock;
905
906 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
907 if (!cp)
908 goto unlock;
909
910 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
911 if (conn)
912 conn->pin_length = cp->pin_len;
913
914 unlock:
915 hci_dev_unlock(hdev);
916 }
917
918 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
919 {
920 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
921
922 BT_DBG("%s status 0x%x", hdev->name, rp->status);
923
924 hci_dev_lock(hdev);
925
926 if (test_bit(HCI_MGMT, &hdev->dev_flags))
927 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
928 rp->status);
929
930 hci_dev_unlock(hdev);
931 }
932
933 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
934 struct sk_buff *skb)
935 {
936 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
937
938 BT_DBG("%s status 0x%x", hdev->name, rp->status);
939
940 if (rp->status)
941 return;
942
943 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
944 hdev->le_pkts = rp->le_max_pkt;
945
946 hdev->le_cnt = hdev->le_pkts;
947
948 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
949
950 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
951 }
952
953 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
954 {
955 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
956
957 BT_DBG("%s status 0x%x", hdev->name, rp->status);
958
959 hci_dev_lock(hdev);
960
961 if (test_bit(HCI_MGMT, &hdev->dev_flags))
962 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
963 rp->status);
964
965 hci_dev_unlock(hdev);
966 }
967
968 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
969 struct sk_buff *skb)
970 {
971 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
972
973 BT_DBG("%s status 0x%x", hdev->name, rp->status);
974
975 hci_dev_lock(hdev);
976
977 if (test_bit(HCI_MGMT, &hdev->dev_flags))
978 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
979 rp->status);
980
981 hci_dev_unlock(hdev);
982 }
983
984 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
985 {
986 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
987
988 BT_DBG("%s status 0x%x", hdev->name, rp->status);
989
990 hci_dev_lock(hdev);
991
992 if (test_bit(HCI_MGMT, &hdev->dev_flags))
993 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
994 rp->status);
995
996 hci_dev_unlock(hdev);
997 }
998
999 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1000 struct sk_buff *skb)
1001 {
1002 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1003
1004 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1005
1006 hci_dev_lock(hdev);
1007
1008 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1009 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1010 rp->status);
1011
1012 hci_dev_unlock(hdev);
1013 }
1014
1015 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1016 struct sk_buff *skb)
1017 {
1018 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1019
1020 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1021
1022 hci_dev_lock(hdev);
1023 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1024 rp->randomizer, rp->status);
1025 hci_dev_unlock(hdev);
1026 }
1027
1028 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1029 {
1030 __u8 status = *((__u8 *) skb->data);
1031
1032 BT_DBG("%s status 0x%x", hdev->name, status);
1033 }
1034
1035 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1036 struct sk_buff *skb)
1037 {
1038 struct hci_cp_le_set_scan_enable *cp;
1039 __u8 status = *((__u8 *) skb->data);
1040
1041 BT_DBG("%s status 0x%x", hdev->name, status);
1042
1043 if (status)
1044 return;
1045
1046 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1047 if (!cp)
1048 return;
1049
1050 switch (cp->enable) {
1051 case LE_SCANNING_ENABLED:
1052 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1053
1054 cancel_delayed_work_sync(&hdev->adv_work);
1055
1056 hci_dev_lock(hdev);
1057 hci_adv_entries_clear(hdev);
1058 hci_dev_unlock(hdev);
1059 break;
1060
1061 case LE_SCANNING_DISABLED:
1062 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1063
1064 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1065 break;
1066
1067 default:
1068 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1069 break;
1070 }
1071 }
1072
1073 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1074 {
1075 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1076
1077 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1078
1079 if (rp->status)
1080 return;
1081
1082 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1083 }
1084
1085 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1086 {
1087 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1088
1089 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1090
1091 if (rp->status)
1092 return;
1093
1094 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1095 }
1096
1097 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1098 struct sk_buff *skb)
1099 {
1100 struct hci_cp_read_local_ext_features cp;
1101 __u8 status = *((__u8 *) skb->data);
1102
1103 BT_DBG("%s status 0x%x", hdev->name, status);
1104
1105 if (status)
1106 return;
1107
1108 cp.page = 0x01;
1109 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
1110 }
1111
1112 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1113 {
1114 BT_DBG("%s status 0x%x", hdev->name, status);
1115
1116 if (status) {
1117 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1118 hci_conn_check_pending(hdev);
1119 hci_dev_lock(hdev);
1120 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1121 mgmt_start_discovery_failed(hdev, status);
1122 hci_dev_unlock(hdev);
1123 return;
1124 }
1125
1126 set_bit(HCI_INQUIRY, &hdev->flags);
1127
1128 hci_dev_lock(hdev);
1129 hci_discovery_set_state(hdev, DISCOVERY_INQUIRY);
1130 hci_dev_unlock(hdev);
1131 }
1132
1133 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1134 {
1135 struct hci_cp_create_conn *cp;
1136 struct hci_conn *conn;
1137
1138 BT_DBG("%s status 0x%x", hdev->name, status);
1139
1140 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1141 if (!cp)
1142 return;
1143
1144 hci_dev_lock(hdev);
1145
1146 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1147
1148 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1149
1150 if (status) {
1151 if (conn && conn->state == BT_CONNECT) {
1152 if (status != 0x0c || conn->attempt > 2) {
1153 conn->state = BT_CLOSED;
1154 hci_proto_connect_cfm(conn, status);
1155 hci_conn_del(conn);
1156 } else
1157 conn->state = BT_CONNECT2;
1158 }
1159 } else {
1160 if (!conn) {
1161 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1162 if (conn) {
1163 conn->out = true;
1164 conn->link_mode |= HCI_LM_MASTER;
1165 } else
1166 BT_ERR("No memory for new connection");
1167 }
1168 }
1169
1170 hci_dev_unlock(hdev);
1171 }
1172
1173 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1174 {
1175 struct hci_cp_add_sco *cp;
1176 struct hci_conn *acl, *sco;
1177 __u16 handle;
1178
1179 BT_DBG("%s status 0x%x", hdev->name, status);
1180
1181 if (!status)
1182 return;
1183
1184 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1185 if (!cp)
1186 return;
1187
1188 handle = __le16_to_cpu(cp->handle);
1189
1190 BT_DBG("%s handle %d", hdev->name, handle);
1191
1192 hci_dev_lock(hdev);
1193
1194 acl = hci_conn_hash_lookup_handle(hdev, handle);
1195 if (acl) {
1196 sco = acl->link;
1197 if (sco) {
1198 sco->state = BT_CLOSED;
1199
1200 hci_proto_connect_cfm(sco, status);
1201 hci_conn_del(sco);
1202 }
1203 }
1204
1205 hci_dev_unlock(hdev);
1206 }
1207
1208 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1209 {
1210 struct hci_cp_auth_requested *cp;
1211 struct hci_conn *conn;
1212
1213 BT_DBG("%s status 0x%x", hdev->name, status);
1214
1215 if (!status)
1216 return;
1217
1218 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1219 if (!cp)
1220 return;
1221
1222 hci_dev_lock(hdev);
1223
1224 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1225 if (conn) {
1226 if (conn->state == BT_CONFIG) {
1227 hci_proto_connect_cfm(conn, status);
1228 hci_conn_put(conn);
1229 }
1230 }
1231
1232 hci_dev_unlock(hdev);
1233 }
1234
1235 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1236 {
1237 struct hci_cp_set_conn_encrypt *cp;
1238 struct hci_conn *conn;
1239
1240 BT_DBG("%s status 0x%x", hdev->name, status);
1241
1242 if (!status)
1243 return;
1244
1245 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1246 if (!cp)
1247 return;
1248
1249 hci_dev_lock(hdev);
1250
1251 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1252 if (conn) {
1253 if (conn->state == BT_CONFIG) {
1254 hci_proto_connect_cfm(conn, status);
1255 hci_conn_put(conn);
1256 }
1257 }
1258
1259 hci_dev_unlock(hdev);
1260 }
1261
1262 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1263 struct hci_conn *conn)
1264 {
1265 if (conn->state != BT_CONFIG || !conn->out)
1266 return 0;
1267
1268 if (conn->pending_sec_level == BT_SECURITY_SDP)
1269 return 0;
1270
1271 /* Only request authentication for SSP connections or non-SSP
1272 * devices with sec_level HIGH or if MITM protection is requested */
1273 if (!hci_conn_ssp_enabled(conn) &&
1274 conn->pending_sec_level != BT_SECURITY_HIGH &&
1275 !(conn->auth_type & 0x01))
1276 return 0;
1277
1278 return 1;
1279 }
1280
1281 static inline int hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e)
1282 {
1283 struct hci_cp_remote_name_req cp;
1284
1285 memset(&cp, 0, sizeof(cp));
1286
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1289 cp.pscan_mode = e->data.pscan_mode;
1290 cp.clock_offset = e->data.clock_offset;
1291
1292 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1293 }
1294
1295 static bool hci_resolve_next_name(struct hci_dev *hdev)
1296 {
1297 struct discovery_state *discov = &hdev->discovery;
1298 struct inquiry_entry *e;
1299
1300 if (list_empty(&discov->resolve))
1301 return false;
1302
1303 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1304 if (hci_resolve_name(hdev, e) == 0) {
1305 e->name_state = NAME_PENDING;
1306 return true;
1307 }
1308
1309 return false;
1310 }
1311
1312 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1313 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1314 {
1315 struct discovery_state *discov = &hdev->discovery;
1316 struct inquiry_entry *e;
1317
1318 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1319 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00,
1320 name, name_len, conn->dev_class);
1321
1322 if (discov->state == DISCOVERY_STOPPED)
1323 return;
1324
1325 if (discov->state == DISCOVERY_STOPPING)
1326 goto discov_complete;
1327
1328 if (discov->state != DISCOVERY_RESOLVING)
1329 return;
1330
1331 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1332 if (e) {
1333 e->name_state = NAME_KNOWN;
1334 list_del(&e->list);
1335 if (name)
1336 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1337 e->data.rssi, name, name_len);
1338 }
1339
1340 if (hci_resolve_next_name(hdev))
1341 return;
1342
1343 discov_complete:
1344 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1345 }
1346
1347 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1348 {
1349 struct hci_cp_remote_name_req *cp;
1350 struct hci_conn *conn;
1351
1352 BT_DBG("%s status 0x%x", hdev->name, status);
1353
1354 /* If successful wait for the name req complete event before
1355 * checking for the need to do authentication */
1356 if (!status)
1357 return;
1358
1359 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1360 if (!cp)
1361 return;
1362
1363 hci_dev_lock(hdev);
1364
1365 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1366
1367 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1368 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1369
1370 if (!conn)
1371 goto unlock;
1372
1373 if (!hci_outgoing_auth_needed(hdev, conn))
1374 goto unlock;
1375
1376 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1377 struct hci_cp_auth_requested cp;
1378 cp.handle = __cpu_to_le16(conn->handle);
1379 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1380 }
1381
1382 unlock:
1383 hci_dev_unlock(hdev);
1384 }
1385
1386 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1387 {
1388 struct hci_cp_read_remote_features *cp;
1389 struct hci_conn *conn;
1390
1391 BT_DBG("%s status 0x%x", hdev->name, status);
1392
1393 if (!status)
1394 return;
1395
1396 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1397 if (!cp)
1398 return;
1399
1400 hci_dev_lock(hdev);
1401
1402 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1403 if (conn) {
1404 if (conn->state == BT_CONFIG) {
1405 hci_proto_connect_cfm(conn, status);
1406 hci_conn_put(conn);
1407 }
1408 }
1409
1410 hci_dev_unlock(hdev);
1411 }
1412
1413 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1414 {
1415 struct hci_cp_read_remote_ext_features *cp;
1416 struct hci_conn *conn;
1417
1418 BT_DBG("%s status 0x%x", hdev->name, status);
1419
1420 if (!status)
1421 return;
1422
1423 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1424 if (!cp)
1425 return;
1426
1427 hci_dev_lock(hdev);
1428
1429 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1430 if (conn) {
1431 if (conn->state == BT_CONFIG) {
1432 hci_proto_connect_cfm(conn, status);
1433 hci_conn_put(conn);
1434 }
1435 }
1436
1437 hci_dev_unlock(hdev);
1438 }
1439
1440 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1441 {
1442 struct hci_cp_setup_sync_conn *cp;
1443 struct hci_conn *acl, *sco;
1444 __u16 handle;
1445
1446 BT_DBG("%s status 0x%x", hdev->name, status);
1447
1448 if (!status)
1449 return;
1450
1451 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1452 if (!cp)
1453 return;
1454
1455 handle = __le16_to_cpu(cp->handle);
1456
1457 BT_DBG("%s handle %d", hdev->name, handle);
1458
1459 hci_dev_lock(hdev);
1460
1461 acl = hci_conn_hash_lookup_handle(hdev, handle);
1462 if (acl) {
1463 sco = acl->link;
1464 if (sco) {
1465 sco->state = BT_CLOSED;
1466
1467 hci_proto_connect_cfm(sco, status);
1468 hci_conn_del(sco);
1469 }
1470 }
1471
1472 hci_dev_unlock(hdev);
1473 }
1474
1475 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1476 {
1477 struct hci_cp_sniff_mode *cp;
1478 struct hci_conn *conn;
1479
1480 BT_DBG("%s status 0x%x", hdev->name, status);
1481
1482 if (!status)
1483 return;
1484
1485 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1486 if (!cp)
1487 return;
1488
1489 hci_dev_lock(hdev);
1490
1491 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1492 if (conn) {
1493 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1494
1495 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1496 hci_sco_setup(conn, status);
1497 }
1498
1499 hci_dev_unlock(hdev);
1500 }
1501
1502 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1503 {
1504 struct hci_cp_exit_sniff_mode *cp;
1505 struct hci_conn *conn;
1506
1507 BT_DBG("%s status 0x%x", hdev->name, status);
1508
1509 if (!status)
1510 return;
1511
1512 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1513 if (!cp)
1514 return;
1515
1516 hci_dev_lock(hdev);
1517
1518 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1519 if (conn) {
1520 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1521
1522 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1523 hci_sco_setup(conn, status);
1524 }
1525
1526 hci_dev_unlock(hdev);
1527 }
1528
1529 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1530 {
1531 struct hci_cp_le_create_conn *cp;
1532 struct hci_conn *conn;
1533
1534 BT_DBG("%s status 0x%x", hdev->name, status);
1535
1536 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1537 if (!cp)
1538 return;
1539
1540 hci_dev_lock(hdev);
1541
1542 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1543
1544 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1545 conn);
1546
1547 if (status) {
1548 if (conn && conn->state == BT_CONNECT) {
1549 conn->state = BT_CLOSED;
1550 hci_proto_connect_cfm(conn, status);
1551 hci_conn_del(conn);
1552 }
1553 } else {
1554 if (!conn) {
1555 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1556 if (conn) {
1557 conn->dst_type = cp->peer_addr_type;
1558 conn->out = true;
1559 } else {
1560 BT_ERR("No memory for new connection");
1561 }
1562 }
1563 }
1564
1565 hci_dev_unlock(hdev);
1566 }
1567
1568 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1569 {
1570 BT_DBG("%s status 0x%x", hdev->name, status);
1571 }
1572
1573 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1574 {
1575 __u8 status = *((__u8 *) skb->data);
1576 struct discovery_state *discov = &hdev->discovery;
1577 struct inquiry_entry *e;
1578
1579 BT_DBG("%s status %d", hdev->name, status);
1580
1581 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1582
1583 hci_conn_check_pending(hdev);
1584
1585 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1586 return;
1587
1588 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1589 return;
1590
1591 hci_dev_lock(hdev);
1592
1593 if (discov->state != DISCOVERY_INQUIRY)
1594 goto unlock;
1595
1596 if (list_empty(&discov->resolve)) {
1597 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1598 goto unlock;
1599 }
1600
1601 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1602 if (e && hci_resolve_name(hdev, e) == 0) {
1603 e->name_state = NAME_PENDING;
1604 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1605 } else {
1606 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1607 }
1608
1609 unlock:
1610 hci_dev_unlock(hdev);
1611 }
1612
1613 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1614 {
1615 struct inquiry_data data;
1616 struct inquiry_info *info = (void *) (skb->data + 1);
1617 int num_rsp = *((__u8 *) skb->data);
1618
1619 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1620
1621 if (!num_rsp)
1622 return;
1623
1624 hci_dev_lock(hdev);
1625
1626 for (; num_rsp; num_rsp--, info++) {
1627 bool name_known;
1628
1629 bacpy(&data.bdaddr, &info->bdaddr);
1630 data.pscan_rep_mode = info->pscan_rep_mode;
1631 data.pscan_period_mode = info->pscan_period_mode;
1632 data.pscan_mode = info->pscan_mode;
1633 memcpy(data.dev_class, info->dev_class, 3);
1634 data.clock_offset = info->clock_offset;
1635 data.rssi = 0x00;
1636 data.ssp_mode = 0x00;
1637
1638 name_known = hci_inquiry_cache_update(hdev, &data, false);
1639 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1640 info->dev_class, 0, !name_known,
1641 NULL, 0);
1642 }
1643
1644 hci_dev_unlock(hdev);
1645 }
1646
1647 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1648 {
1649 struct hci_ev_conn_complete *ev = (void *) skb->data;
1650 struct hci_conn *conn;
1651
1652 BT_DBG("%s", hdev->name);
1653
1654 hci_dev_lock(hdev);
1655
1656 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1657 if (!conn) {
1658 if (ev->link_type != SCO_LINK)
1659 goto unlock;
1660
1661 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1662 if (!conn)
1663 goto unlock;
1664
1665 conn->type = SCO_LINK;
1666 }
1667
1668 if (!ev->status) {
1669 conn->handle = __le16_to_cpu(ev->handle);
1670
1671 if (conn->type == ACL_LINK) {
1672 conn->state = BT_CONFIG;
1673 hci_conn_hold(conn);
1674 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1675 } else
1676 conn->state = BT_CONNECTED;
1677
1678 hci_conn_hold_device(conn);
1679 hci_conn_add_sysfs(conn);
1680
1681 if (test_bit(HCI_AUTH, &hdev->flags))
1682 conn->link_mode |= HCI_LM_AUTH;
1683
1684 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1685 conn->link_mode |= HCI_LM_ENCRYPT;
1686
1687 /* Get remote features */
1688 if (conn->type == ACL_LINK) {
1689 struct hci_cp_read_remote_features cp;
1690 cp.handle = ev->handle;
1691 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1692 sizeof(cp), &cp);
1693 }
1694
1695 /* Set packet type for incoming connection */
1696 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1697 struct hci_cp_change_conn_ptype cp;
1698 cp.handle = ev->handle;
1699 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1700 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1701 sizeof(cp), &cp);
1702 }
1703 } else {
1704 conn->state = BT_CLOSED;
1705 if (conn->type == ACL_LINK)
1706 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1707 conn->dst_type, ev->status);
1708 }
1709
1710 if (conn->type == ACL_LINK)
1711 hci_sco_setup(conn, ev->status);
1712
1713 if (ev->status) {
1714 hci_proto_connect_cfm(conn, ev->status);
1715 hci_conn_del(conn);
1716 } else if (ev->link_type != ACL_LINK)
1717 hci_proto_connect_cfm(conn, ev->status);
1718
1719 unlock:
1720 hci_dev_unlock(hdev);
1721
1722 hci_conn_check_pending(hdev);
1723 }
1724
1725 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1726 {
1727 struct hci_ev_conn_request *ev = (void *) skb->data;
1728 int mask = hdev->link_mode;
1729
1730 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1731 batostr(&ev->bdaddr), ev->link_type);
1732
1733 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1734
1735 if ((mask & HCI_LM_ACCEPT) &&
1736 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1737 /* Connection accepted */
1738 struct inquiry_entry *ie;
1739 struct hci_conn *conn;
1740
1741 hci_dev_lock(hdev);
1742
1743 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1744 if (ie)
1745 memcpy(ie->data.dev_class, ev->dev_class, 3);
1746
1747 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1748 if (!conn) {
1749 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1750 if (!conn) {
1751 BT_ERR("No memory for new connection");
1752 hci_dev_unlock(hdev);
1753 return;
1754 }
1755 }
1756
1757 memcpy(conn->dev_class, ev->dev_class, 3);
1758 conn->state = BT_CONNECT;
1759
1760 hci_dev_unlock(hdev);
1761
1762 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1763 struct hci_cp_accept_conn_req cp;
1764
1765 bacpy(&cp.bdaddr, &ev->bdaddr);
1766
1767 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1768 cp.role = 0x00; /* Become master */
1769 else
1770 cp.role = 0x01; /* Remain slave */
1771
1772 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1773 sizeof(cp), &cp);
1774 } else {
1775 struct hci_cp_accept_sync_conn_req cp;
1776
1777 bacpy(&cp.bdaddr, &ev->bdaddr);
1778 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1779
1780 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1781 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1782 cp.max_latency = cpu_to_le16(0xffff);
1783 cp.content_format = cpu_to_le16(hdev->voice_setting);
1784 cp.retrans_effort = 0xff;
1785
1786 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1787 sizeof(cp), &cp);
1788 }
1789 } else {
1790 /* Connection rejected */
1791 struct hci_cp_reject_conn_req cp;
1792
1793 bacpy(&cp.bdaddr, &ev->bdaddr);
1794 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1795 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1796 }
1797 }
1798
1799 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1800 {
1801 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1802 struct hci_conn *conn;
1803
1804 BT_DBG("%s status %d", hdev->name, ev->status);
1805
1806 hci_dev_lock(hdev);
1807
1808 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1809 if (!conn)
1810 goto unlock;
1811
1812 if (ev->status == 0)
1813 conn->state = BT_CLOSED;
1814
1815 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1816 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1817 if (ev->status != 0)
1818 mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
1819 else
1820 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1821 conn->dst_type);
1822 }
1823
1824 if (ev->status == 0) {
1825 hci_proto_disconn_cfm(conn, ev->reason);
1826 hci_conn_del(conn);
1827 }
1828
1829 unlock:
1830 hci_dev_unlock(hdev);
1831 }
1832
1833 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1834 {
1835 struct hci_ev_auth_complete *ev = (void *) skb->data;
1836 struct hci_conn *conn;
1837
1838 BT_DBG("%s status %d", hdev->name, ev->status);
1839
1840 hci_dev_lock(hdev);
1841
1842 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1843 if (!conn)
1844 goto unlock;
1845
1846 if (!ev->status) {
1847 if (!hci_conn_ssp_enabled(conn) &&
1848 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1849 BT_INFO("re-auth of legacy device is not possible.");
1850 } else {
1851 conn->link_mode |= HCI_LM_AUTH;
1852 conn->sec_level = conn->pending_sec_level;
1853 }
1854 } else {
1855 mgmt_auth_failed(hdev, &conn->dst, ev->status);
1856 }
1857
1858 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1859 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1860
1861 if (conn->state == BT_CONFIG) {
1862 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1863 struct hci_cp_set_conn_encrypt cp;
1864 cp.handle = ev->handle;
1865 cp.encrypt = 0x01;
1866 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1867 &cp);
1868 } else {
1869 conn->state = BT_CONNECTED;
1870 hci_proto_connect_cfm(conn, ev->status);
1871 hci_conn_put(conn);
1872 }
1873 } else {
1874 hci_auth_cfm(conn, ev->status);
1875
1876 hci_conn_hold(conn);
1877 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1878 hci_conn_put(conn);
1879 }
1880
1881 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1882 if (!ev->status) {
1883 struct hci_cp_set_conn_encrypt cp;
1884 cp.handle = ev->handle;
1885 cp.encrypt = 0x01;
1886 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1887 &cp);
1888 } else {
1889 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1890 hci_encrypt_cfm(conn, ev->status, 0x00);
1891 }
1892 }
1893
1894 unlock:
1895 hci_dev_unlock(hdev);
1896 }
1897
1898 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1899 {
1900 struct hci_ev_remote_name *ev = (void *) skb->data;
1901 struct hci_conn *conn;
1902
1903 BT_DBG("%s", hdev->name);
1904
1905 hci_conn_check_pending(hdev);
1906
1907 hci_dev_lock(hdev);
1908
1909 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1910
1911 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1912 goto check_auth;
1913
1914 if (ev->status == 0)
1915 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1916 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1917 else
1918 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1919
1920 check_auth:
1921 if (!conn)
1922 goto unlock;
1923
1924 if (!hci_outgoing_auth_needed(hdev, conn))
1925 goto unlock;
1926
1927 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1928 struct hci_cp_auth_requested cp;
1929 cp.handle = __cpu_to_le16(conn->handle);
1930 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1931 }
1932
1933 unlock:
1934 hci_dev_unlock(hdev);
1935 }
1936
1937 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1938 {
1939 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1940 struct hci_conn *conn;
1941
1942 BT_DBG("%s status %d", hdev->name, ev->status);
1943
1944 hci_dev_lock(hdev);
1945
1946 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1947 if (conn) {
1948 if (!ev->status) {
1949 if (ev->encrypt) {
1950 /* Encryption implies authentication */
1951 conn->link_mode |= HCI_LM_AUTH;
1952 conn->link_mode |= HCI_LM_ENCRYPT;
1953 conn->sec_level = conn->pending_sec_level;
1954 } else
1955 conn->link_mode &= ~HCI_LM_ENCRYPT;
1956 }
1957
1958 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1959
1960 if (conn->state == BT_CONFIG) {
1961 if (!ev->status)
1962 conn->state = BT_CONNECTED;
1963
1964 hci_proto_connect_cfm(conn, ev->status);
1965 hci_conn_put(conn);
1966 } else
1967 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1968 }
1969
1970 hci_dev_unlock(hdev);
1971 }
1972
1973 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1974 {
1975 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1976 struct hci_conn *conn;
1977
1978 BT_DBG("%s status %d", hdev->name, ev->status);
1979
1980 hci_dev_lock(hdev);
1981
1982 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1983 if (conn) {
1984 if (!ev->status)
1985 conn->link_mode |= HCI_LM_SECURE;
1986
1987 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1988
1989 hci_key_change_cfm(conn, ev->status);
1990 }
1991
1992 hci_dev_unlock(hdev);
1993 }
1994
1995 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1996 {
1997 struct hci_ev_remote_features *ev = (void *) skb->data;
1998 struct hci_conn *conn;
1999
2000 BT_DBG("%s status %d", hdev->name, ev->status);
2001
2002 hci_dev_lock(hdev);
2003
2004 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2005 if (!conn)
2006 goto unlock;
2007
2008 if (!ev->status)
2009 memcpy(conn->features, ev->features, 8);
2010
2011 if (conn->state != BT_CONFIG)
2012 goto unlock;
2013
2014 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2015 struct hci_cp_read_remote_ext_features cp;
2016 cp.handle = ev->handle;
2017 cp.page = 0x01;
2018 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2019 sizeof(cp), &cp);
2020 goto unlock;
2021 }
2022
2023 if (!ev->status) {
2024 struct hci_cp_remote_name_req cp;
2025 memset(&cp, 0, sizeof(cp));
2026 bacpy(&cp.bdaddr, &conn->dst);
2027 cp.pscan_rep_mode = 0x02;
2028 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2029 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2030 mgmt_device_connected(hdev, &conn->dst, conn->type,
2031 conn->dst_type, NULL, 0,
2032 conn->dev_class);
2033
2034 if (!hci_outgoing_auth_needed(hdev, conn)) {
2035 conn->state = BT_CONNECTED;
2036 hci_proto_connect_cfm(conn, ev->status);
2037 hci_conn_put(conn);
2038 }
2039
2040 unlock:
2041 hci_dev_unlock(hdev);
2042 }
2043
2044 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2045 {
2046 BT_DBG("%s", hdev->name);
2047 }
2048
2049 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2050 {
2051 BT_DBG("%s", hdev->name);
2052 }
2053
2054 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2055 {
2056 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2057 __u16 opcode;
2058
2059 skb_pull(skb, sizeof(*ev));
2060
2061 opcode = __le16_to_cpu(ev->opcode);
2062
2063 switch (opcode) {
2064 case HCI_OP_INQUIRY_CANCEL:
2065 hci_cc_inquiry_cancel(hdev, skb);
2066 break;
2067
2068 case HCI_OP_EXIT_PERIODIC_INQ:
2069 hci_cc_exit_periodic_inq(hdev, skb);
2070 break;
2071
2072 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2073 hci_cc_remote_name_req_cancel(hdev, skb);
2074 break;
2075
2076 case HCI_OP_ROLE_DISCOVERY:
2077 hci_cc_role_discovery(hdev, skb);
2078 break;
2079
2080 case HCI_OP_READ_LINK_POLICY:
2081 hci_cc_read_link_policy(hdev, skb);
2082 break;
2083
2084 case HCI_OP_WRITE_LINK_POLICY:
2085 hci_cc_write_link_policy(hdev, skb);
2086 break;
2087
2088 case HCI_OP_READ_DEF_LINK_POLICY:
2089 hci_cc_read_def_link_policy(hdev, skb);
2090 break;
2091
2092 case HCI_OP_WRITE_DEF_LINK_POLICY:
2093 hci_cc_write_def_link_policy(hdev, skb);
2094 break;
2095
2096 case HCI_OP_RESET:
2097 hci_cc_reset(hdev, skb);
2098 break;
2099
2100 case HCI_OP_WRITE_LOCAL_NAME:
2101 hci_cc_write_local_name(hdev, skb);
2102 break;
2103
2104 case HCI_OP_READ_LOCAL_NAME:
2105 hci_cc_read_local_name(hdev, skb);
2106 break;
2107
2108 case HCI_OP_WRITE_AUTH_ENABLE:
2109 hci_cc_write_auth_enable(hdev, skb);
2110 break;
2111
2112 case HCI_OP_WRITE_ENCRYPT_MODE:
2113 hci_cc_write_encrypt_mode(hdev, skb);
2114 break;
2115
2116 case HCI_OP_WRITE_SCAN_ENABLE:
2117 hci_cc_write_scan_enable(hdev, skb);
2118 break;
2119
2120 case HCI_OP_READ_CLASS_OF_DEV:
2121 hci_cc_read_class_of_dev(hdev, skb);
2122 break;
2123
2124 case HCI_OP_WRITE_CLASS_OF_DEV:
2125 hci_cc_write_class_of_dev(hdev, skb);
2126 break;
2127
2128 case HCI_OP_READ_VOICE_SETTING:
2129 hci_cc_read_voice_setting(hdev, skb);
2130 break;
2131
2132 case HCI_OP_WRITE_VOICE_SETTING:
2133 hci_cc_write_voice_setting(hdev, skb);
2134 break;
2135
2136 case HCI_OP_HOST_BUFFER_SIZE:
2137 hci_cc_host_buffer_size(hdev, skb);
2138 break;
2139
2140 case HCI_OP_READ_SSP_MODE:
2141 hci_cc_read_ssp_mode(hdev, skb);
2142 break;
2143
2144 case HCI_OP_WRITE_SSP_MODE:
2145 hci_cc_write_ssp_mode(hdev, skb);
2146 break;
2147
2148 case HCI_OP_READ_LOCAL_VERSION:
2149 hci_cc_read_local_version(hdev, skb);
2150 break;
2151
2152 case HCI_OP_READ_LOCAL_COMMANDS:
2153 hci_cc_read_local_commands(hdev, skb);
2154 break;
2155
2156 case HCI_OP_READ_LOCAL_FEATURES:
2157 hci_cc_read_local_features(hdev, skb);
2158 break;
2159
2160 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2161 hci_cc_read_local_ext_features(hdev, skb);
2162 break;
2163
2164 case HCI_OP_READ_BUFFER_SIZE:
2165 hci_cc_read_buffer_size(hdev, skb);
2166 break;
2167
2168 case HCI_OP_READ_BD_ADDR:
2169 hci_cc_read_bd_addr(hdev, skb);
2170 break;
2171
2172 case HCI_OP_READ_DATA_BLOCK_SIZE:
2173 hci_cc_read_data_block_size(hdev, skb);
2174 break;
2175
2176 case HCI_OP_WRITE_CA_TIMEOUT:
2177 hci_cc_write_ca_timeout(hdev, skb);
2178 break;
2179
2180 case HCI_OP_READ_FLOW_CONTROL_MODE:
2181 hci_cc_read_flow_control_mode(hdev, skb);
2182 break;
2183
2184 case HCI_OP_READ_LOCAL_AMP_INFO:
2185 hci_cc_read_local_amp_info(hdev, skb);
2186 break;
2187
2188 case HCI_OP_DELETE_STORED_LINK_KEY:
2189 hci_cc_delete_stored_link_key(hdev, skb);
2190 break;
2191
2192 case HCI_OP_SET_EVENT_MASK:
2193 hci_cc_set_event_mask(hdev, skb);
2194 break;
2195
2196 case HCI_OP_WRITE_INQUIRY_MODE:
2197 hci_cc_write_inquiry_mode(hdev, skb);
2198 break;
2199
2200 case HCI_OP_READ_INQ_RSP_TX_POWER:
2201 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2202 break;
2203
2204 case HCI_OP_SET_EVENT_FLT:
2205 hci_cc_set_event_flt(hdev, skb);
2206 break;
2207
2208 case HCI_OP_PIN_CODE_REPLY:
2209 hci_cc_pin_code_reply(hdev, skb);
2210 break;
2211
2212 case HCI_OP_PIN_CODE_NEG_REPLY:
2213 hci_cc_pin_code_neg_reply(hdev, skb);
2214 break;
2215
2216 case HCI_OP_READ_LOCAL_OOB_DATA:
2217 hci_cc_read_local_oob_data_reply(hdev, skb);
2218 break;
2219
2220 case HCI_OP_LE_READ_BUFFER_SIZE:
2221 hci_cc_le_read_buffer_size(hdev, skb);
2222 break;
2223
2224 case HCI_OP_USER_CONFIRM_REPLY:
2225 hci_cc_user_confirm_reply(hdev, skb);
2226 break;
2227
2228 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2229 hci_cc_user_confirm_neg_reply(hdev, skb);
2230 break;
2231
2232 case HCI_OP_USER_PASSKEY_REPLY:
2233 hci_cc_user_passkey_reply(hdev, skb);
2234 break;
2235
2236 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2237 hci_cc_user_passkey_neg_reply(hdev, skb);
2238
2239 case HCI_OP_LE_SET_SCAN_PARAM:
2240 hci_cc_le_set_scan_param(hdev, skb);
2241 break;
2242
2243 case HCI_OP_LE_SET_SCAN_ENABLE:
2244 hci_cc_le_set_scan_enable(hdev, skb);
2245 break;
2246
2247 case HCI_OP_LE_LTK_REPLY:
2248 hci_cc_le_ltk_reply(hdev, skb);
2249 break;
2250
2251 case HCI_OP_LE_LTK_NEG_REPLY:
2252 hci_cc_le_ltk_neg_reply(hdev, skb);
2253 break;
2254
2255 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2256 hci_cc_write_le_host_supported(hdev, skb);
2257 break;
2258
2259 default:
2260 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2261 break;
2262 }
2263
2264 if (ev->opcode != HCI_OP_NOP)
2265 del_timer(&hdev->cmd_timer);
2266
2267 if (ev->ncmd) {
2268 atomic_set(&hdev->cmd_cnt, 1);
2269 if (!skb_queue_empty(&hdev->cmd_q))
2270 queue_work(hdev->workqueue, &hdev->cmd_work);
2271 }
2272 }
2273
2274 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2275 {
2276 struct hci_ev_cmd_status *ev = (void *) skb->data;
2277 __u16 opcode;
2278
2279 skb_pull(skb, sizeof(*ev));
2280
2281 opcode = __le16_to_cpu(ev->opcode);
2282
2283 switch (opcode) {
2284 case HCI_OP_INQUIRY:
2285 hci_cs_inquiry(hdev, ev->status);
2286 break;
2287
2288 case HCI_OP_CREATE_CONN:
2289 hci_cs_create_conn(hdev, ev->status);
2290 break;
2291
2292 case HCI_OP_ADD_SCO:
2293 hci_cs_add_sco(hdev, ev->status);
2294 break;
2295
2296 case HCI_OP_AUTH_REQUESTED:
2297 hci_cs_auth_requested(hdev, ev->status);
2298 break;
2299
2300 case HCI_OP_SET_CONN_ENCRYPT:
2301 hci_cs_set_conn_encrypt(hdev, ev->status);
2302 break;
2303
2304 case HCI_OP_REMOTE_NAME_REQ:
2305 hci_cs_remote_name_req(hdev, ev->status);
2306 break;
2307
2308 case HCI_OP_READ_REMOTE_FEATURES:
2309 hci_cs_read_remote_features(hdev, ev->status);
2310 break;
2311
2312 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2313 hci_cs_read_remote_ext_features(hdev, ev->status);
2314 break;
2315
2316 case HCI_OP_SETUP_SYNC_CONN:
2317 hci_cs_setup_sync_conn(hdev, ev->status);
2318 break;
2319
2320 case HCI_OP_SNIFF_MODE:
2321 hci_cs_sniff_mode(hdev, ev->status);
2322 break;
2323
2324 case HCI_OP_EXIT_SNIFF_MODE:
2325 hci_cs_exit_sniff_mode(hdev, ev->status);
2326 break;
2327
2328 case HCI_OP_DISCONNECT:
2329 if (ev->status != 0)
2330 mgmt_disconnect_failed(hdev, NULL, ev->status);
2331 break;
2332
2333 case HCI_OP_LE_CREATE_CONN:
2334 hci_cs_le_create_conn(hdev, ev->status);
2335 break;
2336
2337 case HCI_OP_LE_START_ENC:
2338 hci_cs_le_start_enc(hdev, ev->status);
2339 break;
2340
2341 default:
2342 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2343 break;
2344 }
2345
2346 if (ev->opcode != HCI_OP_NOP)
2347 del_timer(&hdev->cmd_timer);
2348
2349 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2350 atomic_set(&hdev->cmd_cnt, 1);
2351 if (!skb_queue_empty(&hdev->cmd_q))
2352 queue_work(hdev->workqueue, &hdev->cmd_work);
2353 }
2354 }
2355
2356 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2357 {
2358 struct hci_ev_role_change *ev = (void *) skb->data;
2359 struct hci_conn *conn;
2360
2361 BT_DBG("%s status %d", hdev->name, ev->status);
2362
2363 hci_dev_lock(hdev);
2364
2365 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2366 if (conn) {
2367 if (!ev->status) {
2368 if (ev->role)
2369 conn->link_mode &= ~HCI_LM_MASTER;
2370 else
2371 conn->link_mode |= HCI_LM_MASTER;
2372 }
2373
2374 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2375
2376 hci_role_switch_cfm(conn, ev->status, ev->role);
2377 }
2378
2379 hci_dev_unlock(hdev);
2380 }
2381
2382 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2383 {
2384 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2385 int i;
2386
2387 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2388 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2389 return;
2390 }
2391
2392 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2393 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2394 BT_DBG("%s bad parameters", hdev->name);
2395 return;
2396 }
2397
2398 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2399
2400 for (i = 0; i < ev->num_hndl; i++) {
2401 struct hci_comp_pkts_info *info = &ev->handles[i];
2402 struct hci_conn *conn;
2403 __u16 handle, count;
2404
2405 handle = __le16_to_cpu(info->handle);
2406 count = __le16_to_cpu(info->count);
2407
2408 conn = hci_conn_hash_lookup_handle(hdev, handle);
2409 if (!conn)
2410 continue;
2411
2412 conn->sent -= count;
2413
2414 switch (conn->type) {
2415 case ACL_LINK:
2416 hdev->acl_cnt += count;
2417 if (hdev->acl_cnt > hdev->acl_pkts)
2418 hdev->acl_cnt = hdev->acl_pkts;
2419 break;
2420
2421 case LE_LINK:
2422 if (hdev->le_pkts) {
2423 hdev->le_cnt += count;
2424 if (hdev->le_cnt > hdev->le_pkts)
2425 hdev->le_cnt = hdev->le_pkts;
2426 } else {
2427 hdev->acl_cnt += count;
2428 if (hdev->acl_cnt > hdev->acl_pkts)
2429 hdev->acl_cnt = hdev->acl_pkts;
2430 }
2431 break;
2432
2433 case SCO_LINK:
2434 hdev->sco_cnt += count;
2435 if (hdev->sco_cnt > hdev->sco_pkts)
2436 hdev->sco_cnt = hdev->sco_pkts;
2437 break;
2438
2439 default:
2440 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2441 break;
2442 }
2443 }
2444
2445 queue_work(hdev->workqueue, &hdev->tx_work);
2446 }
2447
2448 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2449 struct sk_buff *skb)
2450 {
2451 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2452 int i;
2453
2454 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2455 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2456 return;
2457 }
2458
2459 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2460 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2461 BT_DBG("%s bad parameters", hdev->name);
2462 return;
2463 }
2464
2465 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2466 ev->num_hndl);
2467
2468 for (i = 0; i < ev->num_hndl; i++) {
2469 struct hci_comp_blocks_info *info = &ev->handles[i];
2470 struct hci_conn *conn;
2471 __u16 handle, block_count;
2472
2473 handle = __le16_to_cpu(info->handle);
2474 block_count = __le16_to_cpu(info->blocks);
2475
2476 conn = hci_conn_hash_lookup_handle(hdev, handle);
2477 if (!conn)
2478 continue;
2479
2480 conn->sent -= block_count;
2481
2482 switch (conn->type) {
2483 case ACL_LINK:
2484 hdev->block_cnt += block_count;
2485 if (hdev->block_cnt > hdev->num_blocks)
2486 hdev->block_cnt = hdev->num_blocks;
2487 break;
2488
2489 default:
2490 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2491 break;
2492 }
2493 }
2494
2495 queue_work(hdev->workqueue, &hdev->tx_work);
2496 }
2497
2498 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2499 {
2500 struct hci_ev_mode_change *ev = (void *) skb->data;
2501 struct hci_conn *conn;
2502
2503 BT_DBG("%s status %d", hdev->name, ev->status);
2504
2505 hci_dev_lock(hdev);
2506
2507 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2508 if (conn) {
2509 conn->mode = ev->mode;
2510 conn->interval = __le16_to_cpu(ev->interval);
2511
2512 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2513 if (conn->mode == HCI_CM_ACTIVE)
2514 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2515 else
2516 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2517 }
2518
2519 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2520 hci_sco_setup(conn, ev->status);
2521 }
2522
2523 hci_dev_unlock(hdev);
2524 }
2525
2526 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2527 {
2528 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2529 struct hci_conn *conn;
2530
2531 BT_DBG("%s", hdev->name);
2532
2533 hci_dev_lock(hdev);
2534
2535 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2536 if (!conn)
2537 goto unlock;
2538
2539 if (conn->state == BT_CONNECTED) {
2540 hci_conn_hold(conn);
2541 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2542 hci_conn_put(conn);
2543 }
2544
2545 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2546 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2547 sizeof(ev->bdaddr), &ev->bdaddr);
2548 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2549 u8 secure;
2550
2551 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2552 secure = 1;
2553 else
2554 secure = 0;
2555
2556 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2557 }
2558
2559 unlock:
2560 hci_dev_unlock(hdev);
2561 }
2562
2563 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2564 {
2565 struct hci_ev_link_key_req *ev = (void *) skb->data;
2566 struct hci_cp_link_key_reply cp;
2567 struct hci_conn *conn;
2568 struct link_key *key;
2569
2570 BT_DBG("%s", hdev->name);
2571
2572 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2573 return;
2574
2575 hci_dev_lock(hdev);
2576
2577 key = hci_find_link_key(hdev, &ev->bdaddr);
2578 if (!key) {
2579 BT_DBG("%s link key not found for %s", hdev->name,
2580 batostr(&ev->bdaddr));
2581 goto not_found;
2582 }
2583
2584 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2585 batostr(&ev->bdaddr));
2586
2587 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2588 key->type == HCI_LK_DEBUG_COMBINATION) {
2589 BT_DBG("%s ignoring debug key", hdev->name);
2590 goto not_found;
2591 }
2592
2593 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2594 if (conn) {
2595 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2596 conn->auth_type != 0xff &&
2597 (conn->auth_type & 0x01)) {
2598 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2599 goto not_found;
2600 }
2601
2602 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2603 conn->pending_sec_level == BT_SECURITY_HIGH) {
2604 BT_DBG("%s ignoring key unauthenticated for high \
2605 security", hdev->name);
2606 goto not_found;
2607 }
2608
2609 conn->key_type = key->type;
2610 conn->pin_length = key->pin_len;
2611 }
2612
2613 bacpy(&cp.bdaddr, &ev->bdaddr);
2614 memcpy(cp.link_key, key->val, 16);
2615
2616 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2617
2618 hci_dev_unlock(hdev);
2619
2620 return;
2621
2622 not_found:
2623 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2624 hci_dev_unlock(hdev);
2625 }
2626
2627 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2628 {
2629 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2630 struct hci_conn *conn;
2631 u8 pin_len = 0;
2632
2633 BT_DBG("%s", hdev->name);
2634
2635 hci_dev_lock(hdev);
2636
2637 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2638 if (conn) {
2639 hci_conn_hold(conn);
2640 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2641 pin_len = conn->pin_length;
2642
2643 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2644 conn->key_type = ev->key_type;
2645
2646 hci_conn_put(conn);
2647 }
2648
2649 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2650 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2651 ev->key_type, pin_len);
2652
2653 hci_dev_unlock(hdev);
2654 }
2655
2656 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2657 {
2658 struct hci_ev_clock_offset *ev = (void *) skb->data;
2659 struct hci_conn *conn;
2660
2661 BT_DBG("%s status %d", hdev->name, ev->status);
2662
2663 hci_dev_lock(hdev);
2664
2665 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2666 if (conn && !ev->status) {
2667 struct inquiry_entry *ie;
2668
2669 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2670 if (ie) {
2671 ie->data.clock_offset = ev->clock_offset;
2672 ie->timestamp = jiffies;
2673 }
2674 }
2675
2676 hci_dev_unlock(hdev);
2677 }
2678
2679 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2680 {
2681 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2682 struct hci_conn *conn;
2683
2684 BT_DBG("%s status %d", hdev->name, ev->status);
2685
2686 hci_dev_lock(hdev);
2687
2688 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2689 if (conn && !ev->status)
2690 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2691
2692 hci_dev_unlock(hdev);
2693 }
2694
2695 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2696 {
2697 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2698 struct inquiry_entry *ie;
2699
2700 BT_DBG("%s", hdev->name);
2701
2702 hci_dev_lock(hdev);
2703
2704 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2705 if (ie) {
2706 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2707 ie->timestamp = jiffies;
2708 }
2709
2710 hci_dev_unlock(hdev);
2711 }
2712
2713 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2714 {
2715 struct inquiry_data data;
2716 int num_rsp = *((__u8 *) skb->data);
2717 bool name_known;
2718
2719 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2720
2721 if (!num_rsp)
2722 return;
2723
2724 hci_dev_lock(hdev);
2725
2726 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2727 struct inquiry_info_with_rssi_and_pscan_mode *info;
2728 info = (void *) (skb->data + 1);
2729
2730 for (; num_rsp; num_rsp--, info++) {
2731 bacpy(&data.bdaddr, &info->bdaddr);
2732 data.pscan_rep_mode = info->pscan_rep_mode;
2733 data.pscan_period_mode = info->pscan_period_mode;
2734 data.pscan_mode = info->pscan_mode;
2735 memcpy(data.dev_class, info->dev_class, 3);
2736 data.clock_offset = info->clock_offset;
2737 data.rssi = info->rssi;
2738 data.ssp_mode = 0x00;
2739
2740 name_known = hci_inquiry_cache_update(hdev, &data,
2741 false);
2742 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2743 info->dev_class, info->rssi,
2744 !name_known, NULL, 0);
2745 }
2746 } else {
2747 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2748
2749 for (; num_rsp; num_rsp--, info++) {
2750 bacpy(&data.bdaddr, &info->bdaddr);
2751 data.pscan_rep_mode = info->pscan_rep_mode;
2752 data.pscan_period_mode = info->pscan_period_mode;
2753 data.pscan_mode = 0x00;
2754 memcpy(data.dev_class, info->dev_class, 3);
2755 data.clock_offset = info->clock_offset;
2756 data.rssi = info->rssi;
2757 data.ssp_mode = 0x00;
2758 name_known = hci_inquiry_cache_update(hdev, &data,
2759 false);
2760 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2761 info->dev_class, info->rssi,
2762 !name_known, NULL, 0);
2763 }
2764 }
2765
2766 hci_dev_unlock(hdev);
2767 }
2768
2769 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2770 {
2771 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2772 struct hci_conn *conn;
2773
2774 BT_DBG("%s", hdev->name);
2775
2776 hci_dev_lock(hdev);
2777
2778 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2779 if (!conn)
2780 goto unlock;
2781
2782 if (!ev->status && ev->page == 0x01) {
2783 struct inquiry_entry *ie;
2784
2785 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2786 if (ie)
2787 ie->data.ssp_mode = (ev->features[0] & 0x01);
2788
2789 if (ev->features[0] & 0x01)
2790 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2791 }
2792
2793 if (conn->state != BT_CONFIG)
2794 goto unlock;
2795
2796 if (!ev->status) {
2797 struct hci_cp_remote_name_req cp;
2798 memset(&cp, 0, sizeof(cp));
2799 bacpy(&cp.bdaddr, &conn->dst);
2800 cp.pscan_rep_mode = 0x02;
2801 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2802 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2803 mgmt_device_connected(hdev, &conn->dst, conn->type,
2804 conn->dst_type, NULL, 0,
2805 conn->dev_class);
2806
2807 if (!hci_outgoing_auth_needed(hdev, conn)) {
2808 conn->state = BT_CONNECTED;
2809 hci_proto_connect_cfm(conn, ev->status);
2810 hci_conn_put(conn);
2811 }
2812
2813 unlock:
2814 hci_dev_unlock(hdev);
2815 }
2816
2817 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2818 {
2819 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2820 struct hci_conn *conn;
2821
2822 BT_DBG("%s status %d", hdev->name, ev->status);
2823
2824 hci_dev_lock(hdev);
2825
2826 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2827 if (!conn) {
2828 if (ev->link_type == ESCO_LINK)
2829 goto unlock;
2830
2831 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2832 if (!conn)
2833 goto unlock;
2834
2835 conn->type = SCO_LINK;
2836 }
2837
2838 switch (ev->status) {
2839 case 0x00:
2840 conn->handle = __le16_to_cpu(ev->handle);
2841 conn->state = BT_CONNECTED;
2842
2843 hci_conn_hold_device(conn);
2844 hci_conn_add_sysfs(conn);
2845 break;
2846
2847 case 0x11: /* Unsupported Feature or Parameter Value */
2848 case 0x1c: /* SCO interval rejected */
2849 case 0x1a: /* Unsupported Remote Feature */
2850 case 0x1f: /* Unspecified error */
2851 if (conn->out && conn->attempt < 2) {
2852 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2853 (hdev->esco_type & EDR_ESCO_MASK);
2854 hci_setup_sync(conn, conn->link->handle);
2855 goto unlock;
2856 }
2857 /* fall through */
2858
2859 default:
2860 conn->state = BT_CLOSED;
2861 break;
2862 }
2863
2864 hci_proto_connect_cfm(conn, ev->status);
2865 if (ev->status)
2866 hci_conn_del(conn);
2867
2868 unlock:
2869 hci_dev_unlock(hdev);
2870 }
2871
2872 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2873 {
2874 BT_DBG("%s", hdev->name);
2875 }
2876
2877 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2878 {
2879 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2880
2881 BT_DBG("%s status %d", hdev->name, ev->status);
2882 }
2883
2884 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2885 {
2886 struct inquiry_data data;
2887 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2888 int num_rsp = *((__u8 *) skb->data);
2889
2890 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2891
2892 if (!num_rsp)
2893 return;
2894
2895 hci_dev_lock(hdev);
2896
2897 for (; num_rsp; num_rsp--, info++) {
2898 bool name_known;
2899
2900 bacpy(&data.bdaddr, &info->bdaddr);
2901 data.pscan_rep_mode = info->pscan_rep_mode;
2902 data.pscan_period_mode = info->pscan_period_mode;
2903 data.pscan_mode = 0x00;
2904 memcpy(data.dev_class, info->dev_class, 3);
2905 data.clock_offset = info->clock_offset;
2906 data.rssi = info->rssi;
2907 data.ssp_mode = 0x01;
2908
2909 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2910 name_known = eir_has_data_type(info->data,
2911 sizeof(info->data),
2912 EIR_NAME_COMPLETE);
2913 else
2914 name_known = true;
2915
2916 name_known = hci_inquiry_cache_update(hdev, &data, name_known);
2917 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2918 info->dev_class, info->rssi,
2919 !name_known, info->data,
2920 sizeof(info->data));
2921 }
2922
2923 hci_dev_unlock(hdev);
2924 }
2925
2926 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2927 {
2928 /* If remote requests dedicated bonding follow that lead */
2929 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2930 /* If both remote and local IO capabilities allow MITM
2931 * protection then require it, otherwise don't */
2932 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2933 return 0x02;
2934 else
2935 return 0x03;
2936 }
2937
2938 /* If remote requests no-bonding follow that lead */
2939 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2940 return conn->remote_auth | (conn->auth_type & 0x01);
2941
2942 return conn->auth_type;
2943 }
2944
2945 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2946 {
2947 struct hci_ev_io_capa_request *ev = (void *) skb->data;
2948 struct hci_conn *conn;
2949
2950 BT_DBG("%s", hdev->name);
2951
2952 hci_dev_lock(hdev);
2953
2954 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2955 if (!conn)
2956 goto unlock;
2957
2958 hci_conn_hold(conn);
2959
2960 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2961 goto unlock;
2962
2963 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
2964 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2965 struct hci_cp_io_capability_reply cp;
2966
2967 bacpy(&cp.bdaddr, &ev->bdaddr);
2968 /* Change the IO capability from KeyboardDisplay
2969 * to DisplayYesNo as it is not supported by BT spec. */
2970 cp.capability = (conn->io_capability == 0x04) ?
2971 0x01 : conn->io_capability;
2972 conn->auth_type = hci_get_auth_req(conn);
2973 cp.authentication = conn->auth_type;
2974
2975 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
2976 hci_find_remote_oob_data(hdev, &conn->dst))
2977 cp.oob_data = 0x01;
2978 else
2979 cp.oob_data = 0x00;
2980
2981 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2982 sizeof(cp), &cp);
2983 } else {
2984 struct hci_cp_io_capability_neg_reply cp;
2985
2986 bacpy(&cp.bdaddr, &ev->bdaddr);
2987 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
2988
2989 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2990 sizeof(cp), &cp);
2991 }
2992
2993 unlock:
2994 hci_dev_unlock(hdev);
2995 }
2996
2997 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2998 {
2999 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3000 struct hci_conn *conn;
3001
3002 BT_DBG("%s", hdev->name);
3003
3004 hci_dev_lock(hdev);
3005
3006 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3007 if (!conn)
3008 goto unlock;
3009
3010 conn->remote_cap = ev->capability;
3011 conn->remote_auth = ev->authentication;
3012 if (ev->oob_data)
3013 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3014
3015 unlock:
3016 hci_dev_unlock(hdev);
3017 }
3018
3019 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3020 struct sk_buff *skb)
3021 {
3022 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3023 int loc_mitm, rem_mitm, confirm_hint = 0;
3024 struct hci_conn *conn;
3025
3026 BT_DBG("%s", hdev->name);
3027
3028 hci_dev_lock(hdev);
3029
3030 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3031 goto unlock;
3032
3033 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3034 if (!conn)
3035 goto unlock;
3036
3037 loc_mitm = (conn->auth_type & 0x01);
3038 rem_mitm = (conn->remote_auth & 0x01);
3039
3040 /* If we require MITM but the remote device can't provide that
3041 * (it has NoInputNoOutput) then reject the confirmation
3042 * request. The only exception is when we're dedicated bonding
3043 * initiators (connect_cfm_cb set) since then we always have the MITM
3044 * bit set. */
3045 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3046 BT_DBG("Rejecting request: remote device can't provide MITM");
3047 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3048 sizeof(ev->bdaddr), &ev->bdaddr);
3049 goto unlock;
3050 }
3051
3052 /* If no side requires MITM protection; auto-accept */
3053 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3054 (!rem_mitm || conn->io_capability == 0x03)) {
3055
3056 /* If we're not the initiators request authorization to
3057 * proceed from user space (mgmt_user_confirm with
3058 * confirm_hint set to 1). */
3059 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3060 BT_DBG("Confirming auto-accept as acceptor");
3061 confirm_hint = 1;
3062 goto confirm;
3063 }
3064
3065 BT_DBG("Auto-accept of user confirmation with %ums delay",
3066 hdev->auto_accept_delay);
3067
3068 if (hdev->auto_accept_delay > 0) {
3069 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3070 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3071 goto unlock;
3072 }
3073
3074 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3075 sizeof(ev->bdaddr), &ev->bdaddr);
3076 goto unlock;
3077 }
3078
3079 confirm:
3080 mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
3081 confirm_hint);
3082
3083 unlock:
3084 hci_dev_unlock(hdev);
3085 }
3086
3087 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3088 struct sk_buff *skb)
3089 {
3090 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3091
3092 BT_DBG("%s", hdev->name);
3093
3094 hci_dev_lock(hdev);
3095
3096 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3097 mgmt_user_passkey_request(hdev, &ev->bdaddr);
3098
3099 hci_dev_unlock(hdev);
3100 }
3101
3102 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3103 {
3104 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3105 struct hci_conn *conn;
3106
3107 BT_DBG("%s", hdev->name);
3108
3109 hci_dev_lock(hdev);
3110
3111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3112 if (!conn)
3113 goto unlock;
3114
3115 /* To avoid duplicate auth_failed events to user space we check
3116 * the HCI_CONN_AUTH_PEND flag which will be set if we
3117 * initiated the authentication. A traditional auth_complete
3118 * event gets always produced as initiator and is also mapped to
3119 * the mgmt_auth_failed event */
3120 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3121 mgmt_auth_failed(hdev, &conn->dst, ev->status);
3122
3123 hci_conn_put(conn);
3124
3125 unlock:
3126 hci_dev_unlock(hdev);
3127 }
3128
3129 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3130 {
3131 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3132 struct inquiry_entry *ie;
3133
3134 BT_DBG("%s", hdev->name);
3135
3136 hci_dev_lock(hdev);
3137
3138 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3139 if (ie)
3140 ie->data.ssp_mode = (ev->features[0] & 0x01);
3141
3142 hci_dev_unlock(hdev);
3143 }
3144
3145 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3146 struct sk_buff *skb)
3147 {
3148 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3149 struct oob_data *data;
3150
3151 BT_DBG("%s", hdev->name);
3152
3153 hci_dev_lock(hdev);
3154
3155 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3156 goto unlock;
3157
3158 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3159 if (data) {
3160 struct hci_cp_remote_oob_data_reply cp;
3161
3162 bacpy(&cp.bdaddr, &ev->bdaddr);
3163 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3164 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3165
3166 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3167 &cp);
3168 } else {
3169 struct hci_cp_remote_oob_data_neg_reply cp;
3170
3171 bacpy(&cp.bdaddr, &ev->bdaddr);
3172 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3173 &cp);
3174 }
3175
3176 unlock:
3177 hci_dev_unlock(hdev);
3178 }
3179
3180 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3181 {
3182 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3183 struct hci_conn *conn;
3184
3185 BT_DBG("%s status %d", hdev->name, ev->status);
3186
3187 hci_dev_lock(hdev);
3188
3189 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3190 if (!conn) {
3191 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3192 if (!conn) {
3193 BT_ERR("No memory for new connection");
3194 hci_dev_unlock(hdev);
3195 return;
3196 }
3197
3198 conn->dst_type = ev->bdaddr_type;
3199 }
3200
3201 if (ev->status) {
3202 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3203 conn->dst_type, ev->status);
3204 hci_proto_connect_cfm(conn, ev->status);
3205 conn->state = BT_CLOSED;
3206 hci_conn_del(conn);
3207 goto unlock;
3208 }
3209
3210 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3211 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3212 conn->dst_type, NULL, 0, 0);
3213
3214 conn->sec_level = BT_SECURITY_LOW;
3215 conn->handle = __le16_to_cpu(ev->handle);
3216 conn->state = BT_CONNECTED;
3217
3218 hci_conn_hold_device(conn);
3219 hci_conn_add_sysfs(conn);
3220
3221 hci_proto_connect_cfm(conn, ev->status);
3222
3223 unlock:
3224 hci_dev_unlock(hdev);
3225 }
3226
3227 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3228 struct sk_buff *skb)
3229 {
3230 u8 num_reports = skb->data[0];
3231 void *ptr = &skb->data[1];
3232 s8 rssi;
3233
3234 hci_dev_lock(hdev);
3235
3236 while (num_reports--) {
3237 struct hci_ev_le_advertising_info *ev = ptr;
3238
3239 hci_add_adv_entry(hdev, ev);
3240
3241 rssi = ev->data[ev->length];
3242 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3243 NULL, rssi, 0, ev->data, ev->length);
3244
3245 ptr += sizeof(*ev) + ev->length + 1;
3246 }
3247
3248 hci_dev_unlock(hdev);
3249 }
3250
3251 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3252 struct sk_buff *skb)
3253 {
3254 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3255 struct hci_cp_le_ltk_reply cp;
3256 struct hci_cp_le_ltk_neg_reply neg;
3257 struct hci_conn *conn;
3258 struct link_key *ltk;
3259
3260 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3261
3262 hci_dev_lock(hdev);
3263
3264 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3265 if (conn == NULL)
3266 goto not_found;
3267
3268 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3269 if (ltk == NULL)
3270 goto not_found;
3271
3272 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3273 cp.handle = cpu_to_le16(conn->handle);
3274 conn->pin_length = ltk->pin_len;
3275
3276 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3277
3278 hci_dev_unlock(hdev);
3279
3280 return;
3281
3282 not_found:
3283 neg.handle = ev->handle;
3284 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3285 hci_dev_unlock(hdev);
3286 }
3287
3288 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3289 {
3290 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3291
3292 skb_pull(skb, sizeof(*le_ev));
3293
3294 switch (le_ev->subevent) {
3295 case HCI_EV_LE_CONN_COMPLETE:
3296 hci_le_conn_complete_evt(hdev, skb);
3297 break;
3298
3299 case HCI_EV_LE_ADVERTISING_REPORT:
3300 hci_le_adv_report_evt(hdev, skb);
3301 break;
3302
3303 case HCI_EV_LE_LTK_REQ:
3304 hci_le_ltk_request_evt(hdev, skb);
3305 break;
3306
3307 default:
3308 break;
3309 }
3310 }
3311
3312 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3313 {
3314 struct hci_event_hdr *hdr = (void *) skb->data;
3315 __u8 event = hdr->evt;
3316
3317 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3318
3319 switch (event) {
3320 case HCI_EV_INQUIRY_COMPLETE:
3321 hci_inquiry_complete_evt(hdev, skb);
3322 break;
3323
3324 case HCI_EV_INQUIRY_RESULT:
3325 hci_inquiry_result_evt(hdev, skb);
3326 break;
3327
3328 case HCI_EV_CONN_COMPLETE:
3329 hci_conn_complete_evt(hdev, skb);
3330 break;
3331
3332 case HCI_EV_CONN_REQUEST:
3333 hci_conn_request_evt(hdev, skb);
3334 break;
3335
3336 case HCI_EV_DISCONN_COMPLETE:
3337 hci_disconn_complete_evt(hdev, skb);
3338 break;
3339
3340 case HCI_EV_AUTH_COMPLETE:
3341 hci_auth_complete_evt(hdev, skb);
3342 break;
3343
3344 case HCI_EV_REMOTE_NAME:
3345 hci_remote_name_evt(hdev, skb);
3346 break;
3347
3348 case HCI_EV_ENCRYPT_CHANGE:
3349 hci_encrypt_change_evt(hdev, skb);
3350 break;
3351
3352 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3353 hci_change_link_key_complete_evt(hdev, skb);
3354 break;
3355
3356 case HCI_EV_REMOTE_FEATURES:
3357 hci_remote_features_evt(hdev, skb);
3358 break;
3359
3360 case HCI_EV_REMOTE_VERSION:
3361 hci_remote_version_evt(hdev, skb);
3362 break;
3363
3364 case HCI_EV_QOS_SETUP_COMPLETE:
3365 hci_qos_setup_complete_evt(hdev, skb);
3366 break;
3367
3368 case HCI_EV_CMD_COMPLETE:
3369 hci_cmd_complete_evt(hdev, skb);
3370 break;
3371
3372 case HCI_EV_CMD_STATUS:
3373 hci_cmd_status_evt(hdev, skb);
3374 break;
3375
3376 case HCI_EV_ROLE_CHANGE:
3377 hci_role_change_evt(hdev, skb);
3378 break;
3379
3380 case HCI_EV_NUM_COMP_PKTS:
3381 hci_num_comp_pkts_evt(hdev, skb);
3382 break;
3383
3384 case HCI_EV_MODE_CHANGE:
3385 hci_mode_change_evt(hdev, skb);
3386 break;
3387
3388 case HCI_EV_PIN_CODE_REQ:
3389 hci_pin_code_request_evt(hdev, skb);
3390 break;
3391
3392 case HCI_EV_LINK_KEY_REQ:
3393 hci_link_key_request_evt(hdev, skb);
3394 break;
3395
3396 case HCI_EV_LINK_KEY_NOTIFY:
3397 hci_link_key_notify_evt(hdev, skb);
3398 break;
3399
3400 case HCI_EV_CLOCK_OFFSET:
3401 hci_clock_offset_evt(hdev, skb);
3402 break;
3403
3404 case HCI_EV_PKT_TYPE_CHANGE:
3405 hci_pkt_type_change_evt(hdev, skb);
3406 break;
3407
3408 case HCI_EV_PSCAN_REP_MODE:
3409 hci_pscan_rep_mode_evt(hdev, skb);
3410 break;
3411
3412 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3413 hci_inquiry_result_with_rssi_evt(hdev, skb);
3414 break;
3415
3416 case HCI_EV_REMOTE_EXT_FEATURES:
3417 hci_remote_ext_features_evt(hdev, skb);
3418 break;
3419
3420 case HCI_EV_SYNC_CONN_COMPLETE:
3421 hci_sync_conn_complete_evt(hdev, skb);
3422 break;
3423
3424 case HCI_EV_SYNC_CONN_CHANGED:
3425 hci_sync_conn_changed_evt(hdev, skb);
3426 break;
3427
3428 case HCI_EV_SNIFF_SUBRATE:
3429 hci_sniff_subrate_evt(hdev, skb);
3430 break;
3431
3432 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3433 hci_extended_inquiry_result_evt(hdev, skb);
3434 break;
3435
3436 case HCI_EV_IO_CAPA_REQUEST:
3437 hci_io_capa_request_evt(hdev, skb);
3438 break;
3439
3440 case HCI_EV_IO_CAPA_REPLY:
3441 hci_io_capa_reply_evt(hdev, skb);
3442 break;
3443
3444 case HCI_EV_USER_CONFIRM_REQUEST:
3445 hci_user_confirm_request_evt(hdev, skb);
3446 break;
3447
3448 case HCI_EV_USER_PASSKEY_REQUEST:
3449 hci_user_passkey_request_evt(hdev, skb);
3450 break;
3451
3452 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3453 hci_simple_pair_complete_evt(hdev, skb);
3454 break;
3455
3456 case HCI_EV_REMOTE_HOST_FEATURES:
3457 hci_remote_host_features_evt(hdev, skb);
3458 break;
3459
3460 case HCI_EV_LE_META:
3461 hci_le_meta_evt(hdev, skb);
3462 break;
3463
3464 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3465 hci_remote_oob_data_request_evt(hdev, skb);
3466 break;
3467
3468 case HCI_EV_NUM_COMP_BLOCKS:
3469 hci_num_comp_blocks_evt(hdev, skb);
3470 break;
3471
3472 default:
3473 BT_DBG("%s event 0x%x", hdev->name, event);
3474 break;
3475 }
3476
3477 kfree_skb(skb);
3478 hdev->stat.evt_rx++;
3479 }
3480
3481 /* Generate internal stack event */
3482 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3483 {
3484 struct hci_event_hdr *hdr;
3485 struct hci_ev_stack_internal *ev;
3486 struct sk_buff *skb;
3487
3488 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3489 if (!skb)
3490 return;
3491
3492 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3493 hdr->evt = HCI_EV_STACK_INTERNAL;
3494 hdr->plen = sizeof(*ev) + dlen;
3495
3496 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
3497 ev->type = type;
3498 memcpy(ev->data, data, dlen);
3499
3500 bt_cb(skb)->incoming = 1;
3501 __net_timestamp(skb);
3502
3503 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3504 skb->dev = (void *) hdev;
3505 hci_send_to_sock(hdev, skb, NULL);
3506 kfree_skb(skb);
3507 }
3508
3509 module_param(enable_le, bool, 0644);
3510 MODULE_PARM_DESC(enable_le, "Enable LE support");