Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 /* Handle HCI Event packets */
49
50 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
51 {
52 __u8 status = *((__u8 *) skb->data);
53
54 BT_DBG("%s status 0x%x", hdev->name, status);
55
56 if (status)
57 return;
58
59 clear_bit(HCI_INQUIRY, &hdev->flags);
60
61 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
62
63 hci_conn_check_pending(hdev);
64 }
65
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67 {
68 __u8 status = *((__u8 *) skb->data);
69
70 BT_DBG("%s status 0x%x", hdev->name, status);
71
72 if (status)
73 return;
74
75 clear_bit(HCI_INQUIRY, &hdev->flags);
76
77 hci_conn_check_pending(hdev);
78 }
79
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
81 {
82 BT_DBG("%s", hdev->name);
83 }
84
85 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
86 {
87 struct hci_rp_role_discovery *rp = (void *) skb->data;
88 struct hci_conn *conn;
89
90 BT_DBG("%s status 0x%x", hdev->name, rp->status);
91
92 if (rp->status)
93 return;
94
95 hci_dev_lock(hdev);
96
97 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
98 if (conn) {
99 if (rp->role)
100 conn->link_mode &= ~HCI_LM_MASTER;
101 else
102 conn->link_mode |= HCI_LM_MASTER;
103 }
104
105 hci_dev_unlock(hdev);
106 }
107
108 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
109 {
110 struct hci_rp_read_link_policy *rp = (void *) skb->data;
111 struct hci_conn *conn;
112
113 BT_DBG("%s status 0x%x", hdev->name, rp->status);
114
115 if (rp->status)
116 return;
117
118 hci_dev_lock(hdev);
119
120 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
121 if (conn)
122 conn->link_policy = __le16_to_cpu(rp->policy);
123
124 hci_dev_unlock(hdev);
125 }
126
127 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
128 {
129 struct hci_rp_write_link_policy *rp = (void *) skb->data;
130 struct hci_conn *conn;
131 void *sent;
132
133 BT_DBG("%s status 0x%x", hdev->name, rp->status);
134
135 if (rp->status)
136 return;
137
138 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
139 if (!sent)
140 return;
141
142 hci_dev_lock(hdev);
143
144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
145 if (conn)
146 conn->link_policy = get_unaligned_le16(sent + 2);
147
148 hci_dev_unlock(hdev);
149 }
150
151 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
152 {
153 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
154
155 BT_DBG("%s status 0x%x", hdev->name, rp->status);
156
157 if (rp->status)
158 return;
159
160 hdev->link_policy = __le16_to_cpu(rp->policy);
161 }
162
163 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
164 {
165 __u8 status = *((__u8 *) skb->data);
166 void *sent;
167
168 BT_DBG("%s status 0x%x", hdev->name, status);
169
170 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
171 if (!sent)
172 return;
173
174 if (!status)
175 hdev->link_policy = get_unaligned_le16(sent);
176
177 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
178 }
179
180 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
181 {
182 __u8 status = *((__u8 *) skb->data);
183
184 BT_DBG("%s status 0x%x", hdev->name, status);
185
186 hci_req_complete(hdev, HCI_OP_RESET, status);
187 }
188
189 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
190 {
191 __u8 status = *((__u8 *) skb->data);
192 void *sent;
193
194 BT_DBG("%s status 0x%x", hdev->name, status);
195
196 if (status)
197 return;
198
199 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
200 if (!sent)
201 return;
202
203 memcpy(hdev->dev_name, sent, 248);
204 }
205
206 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
207 {
208 struct hci_rp_read_local_name *rp = (void *) skb->data;
209
210 BT_DBG("%s status 0x%x", hdev->name, rp->status);
211
212 if (rp->status)
213 return;
214
215 memcpy(hdev->dev_name, rp->name, 248);
216 }
217
218 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
219 {
220 __u8 status = *((__u8 *) skb->data);
221 void *sent;
222
223 BT_DBG("%s status 0x%x", hdev->name, status);
224
225 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
226 if (!sent)
227 return;
228
229 if (!status) {
230 __u8 param = *((__u8 *) sent);
231
232 if (param == AUTH_ENABLED)
233 set_bit(HCI_AUTH, &hdev->flags);
234 else
235 clear_bit(HCI_AUTH, &hdev->flags);
236 }
237
238 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
239 }
240
241 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
245
246 BT_DBG("%s status 0x%x", hdev->name, status);
247
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
249 if (!sent)
250 return;
251
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
254
255 if (param)
256 set_bit(HCI_ENCRYPT, &hdev->flags);
257 else
258 clear_bit(HCI_ENCRYPT, &hdev->flags);
259 }
260
261 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
262 }
263
264 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
265 {
266 __u8 status = *((__u8 *) skb->data);
267 void *sent;
268
269 BT_DBG("%s status 0x%x", hdev->name, status);
270
271 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
272 if (!sent)
273 return;
274
275 if (!status) {
276 __u8 param = *((__u8 *) sent);
277 int old_pscan, old_iscan;
278
279 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
280 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
281
282 if (param & SCAN_INQUIRY) {
283 set_bit(HCI_ISCAN, &hdev->flags);
284 if (!old_iscan)
285 mgmt_discoverable(hdev->id, 1);
286 } else if (old_iscan)
287 mgmt_discoverable(hdev->id, 0);
288
289 if (param & SCAN_PAGE) {
290 set_bit(HCI_PSCAN, &hdev->flags);
291 if (!old_pscan)
292 mgmt_connectable(hdev->id, 1);
293 } else if (old_pscan)
294 mgmt_connectable(hdev->id, 0);
295 }
296
297 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
298 }
299
300 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
301 {
302 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
303
304 BT_DBG("%s status 0x%x", hdev->name, rp->status);
305
306 if (rp->status)
307 return;
308
309 memcpy(hdev->dev_class, rp->dev_class, 3);
310
311 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
312 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
313 }
314
315 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
316 {
317 __u8 status = *((__u8 *) skb->data);
318 void *sent;
319
320 BT_DBG("%s status 0x%x", hdev->name, status);
321
322 if (status)
323 return;
324
325 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
326 if (!sent)
327 return;
328
329 memcpy(hdev->dev_class, sent, 3);
330 }
331
332 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
333 {
334 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
335 __u16 setting;
336
337 BT_DBG("%s status 0x%x", hdev->name, rp->status);
338
339 if (rp->status)
340 return;
341
342 setting = __le16_to_cpu(rp->voice_setting);
343
344 if (hdev->voice_setting == setting)
345 return;
346
347 hdev->voice_setting = setting;
348
349 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
350
351 if (hdev->notify) {
352 tasklet_disable(&hdev->tx_task);
353 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
354 tasklet_enable(&hdev->tx_task);
355 }
356 }
357
358 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
359 {
360 __u8 status = *((__u8 *) skb->data);
361 __u16 setting;
362 void *sent;
363
364 BT_DBG("%s status 0x%x", hdev->name, status);
365
366 if (status)
367 return;
368
369 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
370 if (!sent)
371 return;
372
373 setting = get_unaligned_le16(sent);
374
375 if (hdev->voice_setting == setting)
376 return;
377
378 hdev->voice_setting = setting;
379
380 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
381
382 if (hdev->notify) {
383 tasklet_disable(&hdev->tx_task);
384 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
385 tasklet_enable(&hdev->tx_task);
386 }
387 }
388
389 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
390 {
391 __u8 status = *((__u8 *) skb->data);
392
393 BT_DBG("%s status 0x%x", hdev->name, status);
394
395 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
396 }
397
398 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
399 {
400 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
401
402 BT_DBG("%s status 0x%x", hdev->name, rp->status);
403
404 if (rp->status)
405 return;
406
407 hdev->ssp_mode = rp->mode;
408 }
409
410 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
411 {
412 __u8 status = *((__u8 *) skb->data);
413 void *sent;
414
415 BT_DBG("%s status 0x%x", hdev->name, status);
416
417 if (status)
418 return;
419
420 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
421 if (!sent)
422 return;
423
424 hdev->ssp_mode = *((__u8 *) sent);
425 }
426
427 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
428 {
429 if (hdev->features[6] & LMP_EXT_INQ)
430 return 2;
431
432 if (hdev->features[3] & LMP_RSSI_INQ)
433 return 1;
434
435 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
436 hdev->lmp_subver == 0x0757)
437 return 1;
438
439 if (hdev->manufacturer == 15) {
440 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
441 return 1;
442 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
443 return 1;
444 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
445 return 1;
446 }
447
448 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
449 hdev->lmp_subver == 0x1805)
450 return 1;
451
452 return 0;
453 }
454
455 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
456 {
457 u8 mode;
458
459 mode = hci_get_inquiry_mode(hdev);
460
461 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
462 }
463
464 static void hci_setup_event_mask(struct hci_dev *hdev)
465 {
466 /* The second byte is 0xff instead of 0x9f (two reserved bits
467 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
468 * command otherwise */
469 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
470
471 /* Events for 1.2 and newer controllers */
472 if (hdev->lmp_ver > 1) {
473 events[4] |= 0x01; /* Flow Specification Complete */
474 events[4] |= 0x02; /* Inquiry Result with RSSI */
475 events[4] |= 0x04; /* Read Remote Extended Features Complete */
476 events[5] |= 0x08; /* Synchronous Connection Complete */
477 events[5] |= 0x10; /* Synchronous Connection Changed */
478 }
479
480 if (hdev->features[3] & LMP_RSSI_INQ)
481 events[4] |= 0x04; /* Inquiry Result with RSSI */
482
483 if (hdev->features[5] & LMP_SNIFF_SUBR)
484 events[5] |= 0x20; /* Sniff Subrating */
485
486 if (hdev->features[5] & LMP_PAUSE_ENC)
487 events[5] |= 0x80; /* Encryption Key Refresh Complete */
488
489 if (hdev->features[6] & LMP_EXT_INQ)
490 events[5] |= 0x40; /* Extended Inquiry Result */
491
492 if (hdev->features[6] & LMP_NO_FLUSH)
493 events[7] |= 0x01; /* Enhanced Flush Complete */
494
495 if (hdev->features[7] & LMP_LSTO)
496 events[6] |= 0x80; /* Link Supervision Timeout Changed */
497
498 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
499 events[6] |= 0x01; /* IO Capability Request */
500 events[6] |= 0x02; /* IO Capability Response */
501 events[6] |= 0x04; /* User Confirmation Request */
502 events[6] |= 0x08; /* User Passkey Request */
503 events[6] |= 0x10; /* Remote OOB Data Request */
504 events[6] |= 0x20; /* Simple Pairing Complete */
505 events[7] |= 0x04; /* User Passkey Notification */
506 events[7] |= 0x08; /* Keypress Notification */
507 events[7] |= 0x10; /* Remote Host Supported
508 * Features Notification */
509 }
510
511 if (hdev->features[4] & LMP_LE)
512 events[7] |= 0x20; /* LE Meta-Event */
513
514 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
515 }
516
517 static void hci_setup(struct hci_dev *hdev)
518 {
519 hci_setup_event_mask(hdev);
520
521 if (hdev->lmp_ver > 1)
522 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
523
524 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
525 u8 mode = 0x01;
526 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
527 }
528
529 if (hdev->features[3] & LMP_RSSI_INQ)
530 hci_setup_inquiry_mode(hdev);
531
532 if (hdev->features[7] & LMP_INQ_TX_PWR)
533 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
534 }
535
536 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
537 {
538 struct hci_rp_read_local_version *rp = (void *) skb->data;
539
540 BT_DBG("%s status 0x%x", hdev->name, rp->status);
541
542 if (rp->status)
543 return;
544
545 hdev->hci_ver = rp->hci_ver;
546 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
547 hdev->lmp_ver = rp->lmp_ver;
548 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
549 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
550
551 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
552 hdev->manufacturer,
553 hdev->hci_ver, hdev->hci_rev);
554
555 if (test_bit(HCI_INIT, &hdev->flags))
556 hci_setup(hdev);
557 }
558
559 static void hci_setup_link_policy(struct hci_dev *hdev)
560 {
561 u16 link_policy = 0;
562
563 if (hdev->features[0] & LMP_RSWITCH)
564 link_policy |= HCI_LP_RSWITCH;
565 if (hdev->features[0] & LMP_HOLD)
566 link_policy |= HCI_LP_HOLD;
567 if (hdev->features[0] & LMP_SNIFF)
568 link_policy |= HCI_LP_SNIFF;
569 if (hdev->features[1] & LMP_PARK)
570 link_policy |= HCI_LP_PARK;
571
572 link_policy = cpu_to_le16(link_policy);
573 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
574 sizeof(link_policy), &link_policy);
575 }
576
577 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
578 {
579 struct hci_rp_read_local_commands *rp = (void *) skb->data;
580
581 BT_DBG("%s status 0x%x", hdev->name, rp->status);
582
583 if (rp->status)
584 goto done;
585
586 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
587
588 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
589 hci_setup_link_policy(hdev);
590
591 done:
592 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
593 }
594
595 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
596 {
597 struct hci_rp_read_local_features *rp = (void *) skb->data;
598
599 BT_DBG("%s status 0x%x", hdev->name, rp->status);
600
601 if (rp->status)
602 return;
603
604 memcpy(hdev->features, rp->features, 8);
605
606 /* Adjust default settings according to features
607 * supported by device. */
608
609 if (hdev->features[0] & LMP_3SLOT)
610 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
611
612 if (hdev->features[0] & LMP_5SLOT)
613 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
614
615 if (hdev->features[1] & LMP_HV2) {
616 hdev->pkt_type |= (HCI_HV2);
617 hdev->esco_type |= (ESCO_HV2);
618 }
619
620 if (hdev->features[1] & LMP_HV3) {
621 hdev->pkt_type |= (HCI_HV3);
622 hdev->esco_type |= (ESCO_HV3);
623 }
624
625 if (hdev->features[3] & LMP_ESCO)
626 hdev->esco_type |= (ESCO_EV3);
627
628 if (hdev->features[4] & LMP_EV4)
629 hdev->esco_type |= (ESCO_EV4);
630
631 if (hdev->features[4] & LMP_EV5)
632 hdev->esco_type |= (ESCO_EV5);
633
634 if (hdev->features[5] & LMP_EDR_ESCO_2M)
635 hdev->esco_type |= (ESCO_2EV3);
636
637 if (hdev->features[5] & LMP_EDR_ESCO_3M)
638 hdev->esco_type |= (ESCO_3EV3);
639
640 if (hdev->features[5] & LMP_EDR_3S_ESCO)
641 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
642
643 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
644 hdev->features[0], hdev->features[1],
645 hdev->features[2], hdev->features[3],
646 hdev->features[4], hdev->features[5],
647 hdev->features[6], hdev->features[7]);
648 }
649
650 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
651 {
652 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
653
654 BT_DBG("%s status 0x%x", hdev->name, rp->status);
655
656 if (rp->status)
657 return;
658
659 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
660 hdev->sco_mtu = rp->sco_mtu;
661 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
662 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
663
664 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
665 hdev->sco_mtu = 64;
666 hdev->sco_pkts = 8;
667 }
668
669 hdev->acl_cnt = hdev->acl_pkts;
670 hdev->sco_cnt = hdev->sco_pkts;
671
672 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
673 hdev->acl_mtu, hdev->acl_pkts,
674 hdev->sco_mtu, hdev->sco_pkts);
675 }
676
677 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
678 {
679 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
680
681 BT_DBG("%s status 0x%x", hdev->name, rp->status);
682
683 if (!rp->status)
684 bacpy(&hdev->bdaddr, &rp->bdaddr);
685
686 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
687 }
688
689 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
690 {
691 __u8 status = *((__u8 *) skb->data);
692
693 BT_DBG("%s status 0x%x", hdev->name, status);
694
695 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
696 }
697
698 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
699 struct sk_buff *skb)
700 {
701 __u8 status = *((__u8 *) skb->data);
702
703 BT_DBG("%s status 0x%x", hdev->name, status);
704
705 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
706 }
707
708 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
709 {
710 __u8 status = *((__u8 *) skb->data);
711
712 BT_DBG("%s status 0x%x", hdev->name, status);
713
714 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
715 }
716
717 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
718 struct sk_buff *skb)
719 {
720 __u8 status = *((__u8 *) skb->data);
721
722 BT_DBG("%s status 0x%x", hdev->name, status);
723
724 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
725 }
726
727 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
728 struct sk_buff *skb)
729 {
730 __u8 status = *((__u8 *) skb->data);
731
732 BT_DBG("%s status 0x%x", hdev->name, status);
733
734 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
735 }
736
737 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
738 {
739 __u8 status = *((__u8 *) skb->data);
740
741 BT_DBG("%s status 0x%x", hdev->name, status);
742
743 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
744 }
745
746 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
747 {
748 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
749 struct hci_cp_pin_code_reply *cp;
750 struct hci_conn *conn;
751
752 BT_DBG("%s status 0x%x", hdev->name, rp->status);
753
754 if (test_bit(HCI_MGMT, &hdev->flags))
755 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
756
757 if (rp->status != 0)
758 return;
759
760 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
761 if (!cp)
762 return;
763
764 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
765 if (conn)
766 conn->pin_length = cp->pin_len;
767 }
768
769 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
770 {
771 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
772
773 BT_DBG("%s status 0x%x", hdev->name, rp->status);
774
775 if (test_bit(HCI_MGMT, &hdev->flags))
776 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
777 rp->status);
778 }
779 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
780 struct sk_buff *skb)
781 {
782 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
783
784 BT_DBG("%s status 0x%x", hdev->name, rp->status);
785
786 if (rp->status)
787 return;
788
789 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
790 hdev->le_pkts = rp->le_max_pkt;
791
792 hdev->le_cnt = hdev->le_pkts;
793
794 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
795
796 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
797 }
798
799 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
800 {
801 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
802
803 BT_DBG("%s status 0x%x", hdev->name, rp->status);
804
805 if (test_bit(HCI_MGMT, &hdev->flags))
806 mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
807 rp->status);
808 }
809
810 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
811 struct sk_buff *skb)
812 {
813 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
814
815 BT_DBG("%s status 0x%x", hdev->name, rp->status);
816
817 if (test_bit(HCI_MGMT, &hdev->flags))
818 mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
819 rp->status);
820 }
821
822 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
823 {
824 BT_DBG("%s status 0x%x", hdev->name, status);
825
826 if (status) {
827 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
828
829 hci_conn_check_pending(hdev);
830 } else
831 set_bit(HCI_INQUIRY, &hdev->flags);
832 }
833
834 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
835 {
836 struct hci_cp_create_conn *cp;
837 struct hci_conn *conn;
838
839 BT_DBG("%s status 0x%x", hdev->name, status);
840
841 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
842 if (!cp)
843 return;
844
845 hci_dev_lock(hdev);
846
847 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
848
849 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
850
851 if (status) {
852 if (conn && conn->state == BT_CONNECT) {
853 if (status != 0x0c || conn->attempt > 2) {
854 conn->state = BT_CLOSED;
855 hci_proto_connect_cfm(conn, status);
856 hci_conn_del(conn);
857 } else
858 conn->state = BT_CONNECT2;
859 }
860 } else {
861 if (!conn) {
862 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
863 if (conn) {
864 conn->out = 1;
865 conn->link_mode |= HCI_LM_MASTER;
866 } else
867 BT_ERR("No memory for new connection");
868 }
869 }
870
871 hci_dev_unlock(hdev);
872 }
873
874 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
875 {
876 struct hci_cp_add_sco *cp;
877 struct hci_conn *acl, *sco;
878 __u16 handle;
879
880 BT_DBG("%s status 0x%x", hdev->name, status);
881
882 if (!status)
883 return;
884
885 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
886 if (!cp)
887 return;
888
889 handle = __le16_to_cpu(cp->handle);
890
891 BT_DBG("%s handle %d", hdev->name, handle);
892
893 hci_dev_lock(hdev);
894
895 acl = hci_conn_hash_lookup_handle(hdev, handle);
896 if (acl) {
897 sco = acl->link;
898 if (sco) {
899 sco->state = BT_CLOSED;
900
901 hci_proto_connect_cfm(sco, status);
902 hci_conn_del(sco);
903 }
904 }
905
906 hci_dev_unlock(hdev);
907 }
908
909 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
910 {
911 struct hci_cp_auth_requested *cp;
912 struct hci_conn *conn;
913
914 BT_DBG("%s status 0x%x", hdev->name, status);
915
916 if (!status)
917 return;
918
919 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
920 if (!cp)
921 return;
922
923 hci_dev_lock(hdev);
924
925 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
926 if (conn) {
927 if (conn->state == BT_CONFIG) {
928 hci_proto_connect_cfm(conn, status);
929 hci_conn_put(conn);
930 }
931 }
932
933 hci_dev_unlock(hdev);
934 }
935
936 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
937 {
938 struct hci_cp_set_conn_encrypt *cp;
939 struct hci_conn *conn;
940
941 BT_DBG("%s status 0x%x", hdev->name, status);
942
943 if (!status)
944 return;
945
946 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
947 if (!cp)
948 return;
949
950 hci_dev_lock(hdev);
951
952 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
953 if (conn) {
954 if (conn->state == BT_CONFIG) {
955 hci_proto_connect_cfm(conn, status);
956 hci_conn_put(conn);
957 }
958 }
959
960 hci_dev_unlock(hdev);
961 }
962
963 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
964 struct hci_conn *conn)
965 {
966 if (conn->state != BT_CONFIG || !conn->out)
967 return 0;
968
969 if (conn->pending_sec_level == BT_SECURITY_SDP)
970 return 0;
971
972 /* Only request authentication for SSP connections or non-SSP
973 * devices with sec_level HIGH */
974 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
975 conn->pending_sec_level != BT_SECURITY_HIGH)
976 return 0;
977
978 return 1;
979 }
980
981 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
982 {
983 struct hci_cp_remote_name_req *cp;
984 struct hci_conn *conn;
985
986 BT_DBG("%s status 0x%x", hdev->name, status);
987
988 /* If successful wait for the name req complete event before
989 * checking for the need to do authentication */
990 if (!status)
991 return;
992
993 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
994 if (!cp)
995 return;
996
997 hci_dev_lock(hdev);
998
999 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1000 if (conn && hci_outgoing_auth_needed(hdev, conn)) {
1001 struct hci_cp_auth_requested cp;
1002 cp.handle = __cpu_to_le16(conn->handle);
1003 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1004 }
1005
1006 hci_dev_unlock(hdev);
1007 }
1008
1009 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1010 {
1011 struct hci_cp_read_remote_features *cp;
1012 struct hci_conn *conn;
1013
1014 BT_DBG("%s status 0x%x", hdev->name, status);
1015
1016 if (!status)
1017 return;
1018
1019 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1020 if (!cp)
1021 return;
1022
1023 hci_dev_lock(hdev);
1024
1025 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1026 if (conn) {
1027 if (conn->state == BT_CONFIG) {
1028 hci_proto_connect_cfm(conn, status);
1029 hci_conn_put(conn);
1030 }
1031 }
1032
1033 hci_dev_unlock(hdev);
1034 }
1035
1036 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1037 {
1038 struct hci_cp_read_remote_ext_features *cp;
1039 struct hci_conn *conn;
1040
1041 BT_DBG("%s status 0x%x", hdev->name, status);
1042
1043 if (!status)
1044 return;
1045
1046 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1047 if (!cp)
1048 return;
1049
1050 hci_dev_lock(hdev);
1051
1052 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1053 if (conn) {
1054 if (conn->state == BT_CONFIG) {
1055 hci_proto_connect_cfm(conn, status);
1056 hci_conn_put(conn);
1057 }
1058 }
1059
1060 hci_dev_unlock(hdev);
1061 }
1062
1063 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1064 {
1065 struct hci_cp_setup_sync_conn *cp;
1066 struct hci_conn *acl, *sco;
1067 __u16 handle;
1068
1069 BT_DBG("%s status 0x%x", hdev->name, status);
1070
1071 if (!status)
1072 return;
1073
1074 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1075 if (!cp)
1076 return;
1077
1078 handle = __le16_to_cpu(cp->handle);
1079
1080 BT_DBG("%s handle %d", hdev->name, handle);
1081
1082 hci_dev_lock(hdev);
1083
1084 acl = hci_conn_hash_lookup_handle(hdev, handle);
1085 if (acl) {
1086 sco = acl->link;
1087 if (sco) {
1088 sco->state = BT_CLOSED;
1089
1090 hci_proto_connect_cfm(sco, status);
1091 hci_conn_del(sco);
1092 }
1093 }
1094
1095 hci_dev_unlock(hdev);
1096 }
1097
1098 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1099 {
1100 struct hci_cp_sniff_mode *cp;
1101 struct hci_conn *conn;
1102
1103 BT_DBG("%s status 0x%x", hdev->name, status);
1104
1105 if (!status)
1106 return;
1107
1108 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1109 if (!cp)
1110 return;
1111
1112 hci_dev_lock(hdev);
1113
1114 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1115 if (conn) {
1116 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1117
1118 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1119 hci_sco_setup(conn, status);
1120 }
1121
1122 hci_dev_unlock(hdev);
1123 }
1124
1125 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1126 {
1127 struct hci_cp_exit_sniff_mode *cp;
1128 struct hci_conn *conn;
1129
1130 BT_DBG("%s status 0x%x", hdev->name, status);
1131
1132 if (!status)
1133 return;
1134
1135 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1136 if (!cp)
1137 return;
1138
1139 hci_dev_lock(hdev);
1140
1141 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1142 if (conn) {
1143 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1144
1145 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1146 hci_sco_setup(conn, status);
1147 }
1148
1149 hci_dev_unlock(hdev);
1150 }
1151
1152 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1153 {
1154 struct hci_cp_le_create_conn *cp;
1155 struct hci_conn *conn;
1156
1157 BT_DBG("%s status 0x%x", hdev->name, status);
1158
1159 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1160 if (!cp)
1161 return;
1162
1163 hci_dev_lock(hdev);
1164
1165 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1166
1167 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1168 conn);
1169
1170 if (status) {
1171 if (conn && conn->state == BT_CONNECT) {
1172 conn->state = BT_CLOSED;
1173 hci_proto_connect_cfm(conn, status);
1174 hci_conn_del(conn);
1175 }
1176 } else {
1177 if (!conn) {
1178 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1179 if (conn)
1180 conn->out = 1;
1181 else
1182 BT_ERR("No memory for new connection");
1183 }
1184 }
1185
1186 hci_dev_unlock(hdev);
1187 }
1188
1189 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1190 {
1191 __u8 status = *((__u8 *) skb->data);
1192
1193 BT_DBG("%s status %d", hdev->name, status);
1194
1195 clear_bit(HCI_INQUIRY, &hdev->flags);
1196
1197 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1198
1199 hci_conn_check_pending(hdev);
1200 }
1201
1202 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1203 {
1204 struct inquiry_data data;
1205 struct inquiry_info *info = (void *) (skb->data + 1);
1206 int num_rsp = *((__u8 *) skb->data);
1207
1208 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1209
1210 if (!num_rsp)
1211 return;
1212
1213 hci_dev_lock(hdev);
1214
1215 for (; num_rsp; num_rsp--) {
1216 bacpy(&data.bdaddr, &info->bdaddr);
1217 data.pscan_rep_mode = info->pscan_rep_mode;
1218 data.pscan_period_mode = info->pscan_period_mode;
1219 data.pscan_mode = info->pscan_mode;
1220 memcpy(data.dev_class, info->dev_class, 3);
1221 data.clock_offset = info->clock_offset;
1222 data.rssi = 0x00;
1223 data.ssp_mode = 0x00;
1224 info++;
1225 hci_inquiry_cache_update(hdev, &data);
1226 }
1227
1228 hci_dev_unlock(hdev);
1229 }
1230
1231 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1232 {
1233 struct hci_ev_conn_complete *ev = (void *) skb->data;
1234 struct hci_conn *conn;
1235
1236 BT_DBG("%s", hdev->name);
1237
1238 hci_dev_lock(hdev);
1239
1240 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1241 if (!conn) {
1242 if (ev->link_type != SCO_LINK)
1243 goto unlock;
1244
1245 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1246 if (!conn)
1247 goto unlock;
1248
1249 conn->type = SCO_LINK;
1250 }
1251
1252 if (!ev->status) {
1253 conn->handle = __le16_to_cpu(ev->handle);
1254
1255 if (conn->type == ACL_LINK) {
1256 conn->state = BT_CONFIG;
1257 hci_conn_hold(conn);
1258 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1259 mgmt_connected(hdev->id, &ev->bdaddr);
1260 } else
1261 conn->state = BT_CONNECTED;
1262
1263 hci_conn_hold_device(conn);
1264 hci_conn_add_sysfs(conn);
1265
1266 if (test_bit(HCI_AUTH, &hdev->flags))
1267 conn->link_mode |= HCI_LM_AUTH;
1268
1269 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1270 conn->link_mode |= HCI_LM_ENCRYPT;
1271
1272 /* Get remote features */
1273 if (conn->type == ACL_LINK) {
1274 struct hci_cp_read_remote_features cp;
1275 cp.handle = ev->handle;
1276 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1277 sizeof(cp), &cp);
1278 }
1279
1280 /* Set packet type for incoming connection */
1281 if (!conn->out && hdev->hci_ver < 3) {
1282 struct hci_cp_change_conn_ptype cp;
1283 cp.handle = ev->handle;
1284 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1285 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1286 sizeof(cp), &cp);
1287 }
1288 } else {
1289 conn->state = BT_CLOSED;
1290 if (conn->type == ACL_LINK)
1291 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
1292 }
1293
1294 if (conn->type == ACL_LINK)
1295 hci_sco_setup(conn, ev->status);
1296
1297 if (ev->status) {
1298 hci_proto_connect_cfm(conn, ev->status);
1299 hci_conn_del(conn);
1300 } else if (ev->link_type != ACL_LINK)
1301 hci_proto_connect_cfm(conn, ev->status);
1302
1303 unlock:
1304 hci_dev_unlock(hdev);
1305
1306 hci_conn_check_pending(hdev);
1307 }
1308
1309 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1310 {
1311 struct hci_ev_conn_request *ev = (void *) skb->data;
1312 int mask = hdev->link_mode;
1313
1314 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1315 batostr(&ev->bdaddr), ev->link_type);
1316
1317 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1318
1319 if ((mask & HCI_LM_ACCEPT) &&
1320 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1321 /* Connection accepted */
1322 struct inquiry_entry *ie;
1323 struct hci_conn *conn;
1324
1325 hci_dev_lock(hdev);
1326
1327 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1328 if (ie)
1329 memcpy(ie->data.dev_class, ev->dev_class, 3);
1330
1331 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1332 if (!conn) {
1333 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1334 if (!conn) {
1335 BT_ERR("No memory for new connection");
1336 hci_dev_unlock(hdev);
1337 return;
1338 }
1339 }
1340
1341 memcpy(conn->dev_class, ev->dev_class, 3);
1342 conn->state = BT_CONNECT;
1343
1344 hci_dev_unlock(hdev);
1345
1346 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1347 struct hci_cp_accept_conn_req cp;
1348
1349 bacpy(&cp.bdaddr, &ev->bdaddr);
1350
1351 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1352 cp.role = 0x00; /* Become master */
1353 else
1354 cp.role = 0x01; /* Remain slave */
1355
1356 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1357 sizeof(cp), &cp);
1358 } else {
1359 struct hci_cp_accept_sync_conn_req cp;
1360
1361 bacpy(&cp.bdaddr, &ev->bdaddr);
1362 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1363
1364 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1365 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1366 cp.max_latency = cpu_to_le16(0xffff);
1367 cp.content_format = cpu_to_le16(hdev->voice_setting);
1368 cp.retrans_effort = 0xff;
1369
1370 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1371 sizeof(cp), &cp);
1372 }
1373 } else {
1374 /* Connection rejected */
1375 struct hci_cp_reject_conn_req cp;
1376
1377 bacpy(&cp.bdaddr, &ev->bdaddr);
1378 cp.reason = 0x0f;
1379 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1380 }
1381 }
1382
1383 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1384 {
1385 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1386 struct hci_conn *conn;
1387
1388 BT_DBG("%s status %d", hdev->name, ev->status);
1389
1390 if (ev->status) {
1391 mgmt_disconnect_failed(hdev->id);
1392 return;
1393 }
1394
1395 hci_dev_lock(hdev);
1396
1397 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1398 if (!conn)
1399 goto unlock;
1400
1401 conn->state = BT_CLOSED;
1402
1403 if (conn->type == ACL_LINK)
1404 mgmt_disconnected(hdev->id, &conn->dst);
1405
1406 hci_proto_disconn_cfm(conn, ev->reason);
1407 hci_conn_del(conn);
1408
1409 unlock:
1410 hci_dev_unlock(hdev);
1411 }
1412
1413 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1414 {
1415 struct hci_ev_auth_complete *ev = (void *) skb->data;
1416 struct hci_conn *conn;
1417
1418 BT_DBG("%s status %d", hdev->name, ev->status);
1419
1420 hci_dev_lock(hdev);
1421
1422 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1423 if (conn) {
1424 if (!ev->status) {
1425 conn->link_mode |= HCI_LM_AUTH;
1426 conn->sec_level = conn->pending_sec_level;
1427 } else {
1428 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1429 conn->sec_level = BT_SECURITY_LOW;
1430 }
1431
1432 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1433
1434 if (conn->state == BT_CONFIG) {
1435 if (!ev->status && hdev->ssp_mode > 0 &&
1436 conn->ssp_mode > 0) {
1437 struct hci_cp_set_conn_encrypt cp;
1438 cp.handle = ev->handle;
1439 cp.encrypt = 0x01;
1440 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
1441 sizeof(cp), &cp);
1442 } else {
1443 conn->state = BT_CONNECTED;
1444 hci_proto_connect_cfm(conn, ev->status);
1445 hci_conn_put(conn);
1446 }
1447 } else {
1448 hci_auth_cfm(conn, ev->status);
1449
1450 hci_conn_hold(conn);
1451 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1452 hci_conn_put(conn);
1453 }
1454
1455 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1456 if (!ev->status) {
1457 struct hci_cp_set_conn_encrypt cp;
1458 cp.handle = ev->handle;
1459 cp.encrypt = 0x01;
1460 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
1461 sizeof(cp), &cp);
1462 } else {
1463 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1464 hci_encrypt_cfm(conn, ev->status, 0x00);
1465 }
1466 }
1467 }
1468
1469 hci_dev_unlock(hdev);
1470 }
1471
1472 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1473 {
1474 struct hci_ev_remote_name *ev = (void *) skb->data;
1475 struct hci_conn *conn;
1476
1477 BT_DBG("%s", hdev->name);
1478
1479 hci_conn_check_pending(hdev);
1480
1481 hci_dev_lock(hdev);
1482
1483 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1484 if (conn && hci_outgoing_auth_needed(hdev, conn)) {
1485 struct hci_cp_auth_requested cp;
1486 cp.handle = __cpu_to_le16(conn->handle);
1487 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1488 }
1489
1490 hci_dev_unlock(hdev);
1491 }
1492
1493 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1494 {
1495 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1496 struct hci_conn *conn;
1497
1498 BT_DBG("%s status %d", hdev->name, ev->status);
1499
1500 hci_dev_lock(hdev);
1501
1502 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1503 if (conn) {
1504 if (!ev->status) {
1505 if (ev->encrypt) {
1506 /* Encryption implies authentication */
1507 conn->link_mode |= HCI_LM_AUTH;
1508 conn->link_mode |= HCI_LM_ENCRYPT;
1509 } else
1510 conn->link_mode &= ~HCI_LM_ENCRYPT;
1511 }
1512
1513 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1514
1515 if (conn->state == BT_CONFIG) {
1516 if (!ev->status)
1517 conn->state = BT_CONNECTED;
1518
1519 hci_proto_connect_cfm(conn, ev->status);
1520 hci_conn_put(conn);
1521 } else
1522 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1523 }
1524
1525 hci_dev_unlock(hdev);
1526 }
1527
1528 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1529 {
1530 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1531 struct hci_conn *conn;
1532
1533 BT_DBG("%s status %d", hdev->name, ev->status);
1534
1535 hci_dev_lock(hdev);
1536
1537 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1538 if (conn) {
1539 if (!ev->status)
1540 conn->link_mode |= HCI_LM_SECURE;
1541
1542 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1543
1544 hci_key_change_cfm(conn, ev->status);
1545 }
1546
1547 hci_dev_unlock(hdev);
1548 }
1549
1550 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1551 {
1552 struct hci_ev_remote_features *ev = (void *) skb->data;
1553 struct hci_conn *conn;
1554
1555 BT_DBG("%s status %d", hdev->name, ev->status);
1556
1557 hci_dev_lock(hdev);
1558
1559 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1560 if (!conn)
1561 goto unlock;
1562
1563 if (!ev->status)
1564 memcpy(conn->features, ev->features, 8);
1565
1566 if (conn->state != BT_CONFIG)
1567 goto unlock;
1568
1569 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1570 struct hci_cp_read_remote_ext_features cp;
1571 cp.handle = ev->handle;
1572 cp.page = 0x01;
1573 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1574 sizeof(cp), &cp);
1575 goto unlock;
1576 }
1577
1578 if (!ev->status) {
1579 struct hci_cp_remote_name_req cp;
1580 memset(&cp, 0, sizeof(cp));
1581 bacpy(&cp.bdaddr, &conn->dst);
1582 cp.pscan_rep_mode = 0x02;
1583 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1584 }
1585
1586 if (!hci_outgoing_auth_needed(hdev, conn)) {
1587 conn->state = BT_CONNECTED;
1588 hci_proto_connect_cfm(conn, ev->status);
1589 hci_conn_put(conn);
1590 }
1591
1592 unlock:
1593 hci_dev_unlock(hdev);
1594 }
1595
1596 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1597 {
1598 BT_DBG("%s", hdev->name);
1599 }
1600
1601 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1602 {
1603 BT_DBG("%s", hdev->name);
1604 }
1605
1606 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1607 {
1608 struct hci_ev_cmd_complete *ev = (void *) skb->data;
1609 __u16 opcode;
1610
1611 skb_pull(skb, sizeof(*ev));
1612
1613 opcode = __le16_to_cpu(ev->opcode);
1614
1615 switch (opcode) {
1616 case HCI_OP_INQUIRY_CANCEL:
1617 hci_cc_inquiry_cancel(hdev, skb);
1618 break;
1619
1620 case HCI_OP_EXIT_PERIODIC_INQ:
1621 hci_cc_exit_periodic_inq(hdev, skb);
1622 break;
1623
1624 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
1625 hci_cc_remote_name_req_cancel(hdev, skb);
1626 break;
1627
1628 case HCI_OP_ROLE_DISCOVERY:
1629 hci_cc_role_discovery(hdev, skb);
1630 break;
1631
1632 case HCI_OP_READ_LINK_POLICY:
1633 hci_cc_read_link_policy(hdev, skb);
1634 break;
1635
1636 case HCI_OP_WRITE_LINK_POLICY:
1637 hci_cc_write_link_policy(hdev, skb);
1638 break;
1639
1640 case HCI_OP_READ_DEF_LINK_POLICY:
1641 hci_cc_read_def_link_policy(hdev, skb);
1642 break;
1643
1644 case HCI_OP_WRITE_DEF_LINK_POLICY:
1645 hci_cc_write_def_link_policy(hdev, skb);
1646 break;
1647
1648 case HCI_OP_RESET:
1649 hci_cc_reset(hdev, skb);
1650 break;
1651
1652 case HCI_OP_WRITE_LOCAL_NAME:
1653 hci_cc_write_local_name(hdev, skb);
1654 break;
1655
1656 case HCI_OP_READ_LOCAL_NAME:
1657 hci_cc_read_local_name(hdev, skb);
1658 break;
1659
1660 case HCI_OP_WRITE_AUTH_ENABLE:
1661 hci_cc_write_auth_enable(hdev, skb);
1662 break;
1663
1664 case HCI_OP_WRITE_ENCRYPT_MODE:
1665 hci_cc_write_encrypt_mode(hdev, skb);
1666 break;
1667
1668 case HCI_OP_WRITE_SCAN_ENABLE:
1669 hci_cc_write_scan_enable(hdev, skb);
1670 break;
1671
1672 case HCI_OP_READ_CLASS_OF_DEV:
1673 hci_cc_read_class_of_dev(hdev, skb);
1674 break;
1675
1676 case HCI_OP_WRITE_CLASS_OF_DEV:
1677 hci_cc_write_class_of_dev(hdev, skb);
1678 break;
1679
1680 case HCI_OP_READ_VOICE_SETTING:
1681 hci_cc_read_voice_setting(hdev, skb);
1682 break;
1683
1684 case HCI_OP_WRITE_VOICE_SETTING:
1685 hci_cc_write_voice_setting(hdev, skb);
1686 break;
1687
1688 case HCI_OP_HOST_BUFFER_SIZE:
1689 hci_cc_host_buffer_size(hdev, skb);
1690 break;
1691
1692 case HCI_OP_READ_SSP_MODE:
1693 hci_cc_read_ssp_mode(hdev, skb);
1694 break;
1695
1696 case HCI_OP_WRITE_SSP_MODE:
1697 hci_cc_write_ssp_mode(hdev, skb);
1698 break;
1699
1700 case HCI_OP_READ_LOCAL_VERSION:
1701 hci_cc_read_local_version(hdev, skb);
1702 break;
1703
1704 case HCI_OP_READ_LOCAL_COMMANDS:
1705 hci_cc_read_local_commands(hdev, skb);
1706 break;
1707
1708 case HCI_OP_READ_LOCAL_FEATURES:
1709 hci_cc_read_local_features(hdev, skb);
1710 break;
1711
1712 case HCI_OP_READ_BUFFER_SIZE:
1713 hci_cc_read_buffer_size(hdev, skb);
1714 break;
1715
1716 case HCI_OP_READ_BD_ADDR:
1717 hci_cc_read_bd_addr(hdev, skb);
1718 break;
1719
1720 case HCI_OP_WRITE_CA_TIMEOUT:
1721 hci_cc_write_ca_timeout(hdev, skb);
1722 break;
1723
1724 case HCI_OP_DELETE_STORED_LINK_KEY:
1725 hci_cc_delete_stored_link_key(hdev, skb);
1726 break;
1727
1728 case HCI_OP_SET_EVENT_MASK:
1729 hci_cc_set_event_mask(hdev, skb);
1730 break;
1731
1732 case HCI_OP_WRITE_INQUIRY_MODE:
1733 hci_cc_write_inquiry_mode(hdev, skb);
1734 break;
1735
1736 case HCI_OP_READ_INQ_RSP_TX_POWER:
1737 hci_cc_read_inq_rsp_tx_power(hdev, skb);
1738 break;
1739
1740 case HCI_OP_SET_EVENT_FLT:
1741 hci_cc_set_event_flt(hdev, skb);
1742 break;
1743
1744 case HCI_OP_PIN_CODE_REPLY:
1745 hci_cc_pin_code_reply(hdev, skb);
1746 break;
1747
1748 case HCI_OP_PIN_CODE_NEG_REPLY:
1749 hci_cc_pin_code_neg_reply(hdev, skb);
1750 break;
1751
1752 case HCI_OP_LE_READ_BUFFER_SIZE:
1753 hci_cc_le_read_buffer_size(hdev, skb);
1754 break;
1755
1756 case HCI_OP_USER_CONFIRM_REPLY:
1757 hci_cc_user_confirm_reply(hdev, skb);
1758 break;
1759
1760 case HCI_OP_USER_CONFIRM_NEG_REPLY:
1761 hci_cc_user_confirm_neg_reply(hdev, skb);
1762 break;
1763
1764 default:
1765 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1766 break;
1767 }
1768
1769 if (ev->opcode != HCI_OP_NOP)
1770 del_timer(&hdev->cmd_timer);
1771
1772 if (ev->ncmd) {
1773 atomic_set(&hdev->cmd_cnt, 1);
1774 if (!skb_queue_empty(&hdev->cmd_q))
1775 tasklet_schedule(&hdev->cmd_task);
1776 }
1777 }
1778
1779 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1780 {
1781 struct hci_ev_cmd_status *ev = (void *) skb->data;
1782 __u16 opcode;
1783
1784 skb_pull(skb, sizeof(*ev));
1785
1786 opcode = __le16_to_cpu(ev->opcode);
1787
1788 switch (opcode) {
1789 case HCI_OP_INQUIRY:
1790 hci_cs_inquiry(hdev, ev->status);
1791 break;
1792
1793 case HCI_OP_CREATE_CONN:
1794 hci_cs_create_conn(hdev, ev->status);
1795 break;
1796
1797 case HCI_OP_ADD_SCO:
1798 hci_cs_add_sco(hdev, ev->status);
1799 break;
1800
1801 case HCI_OP_AUTH_REQUESTED:
1802 hci_cs_auth_requested(hdev, ev->status);
1803 break;
1804
1805 case HCI_OP_SET_CONN_ENCRYPT:
1806 hci_cs_set_conn_encrypt(hdev, ev->status);
1807 break;
1808
1809 case HCI_OP_REMOTE_NAME_REQ:
1810 hci_cs_remote_name_req(hdev, ev->status);
1811 break;
1812
1813 case HCI_OP_READ_REMOTE_FEATURES:
1814 hci_cs_read_remote_features(hdev, ev->status);
1815 break;
1816
1817 case HCI_OP_READ_REMOTE_EXT_FEATURES:
1818 hci_cs_read_remote_ext_features(hdev, ev->status);
1819 break;
1820
1821 case HCI_OP_SETUP_SYNC_CONN:
1822 hci_cs_setup_sync_conn(hdev, ev->status);
1823 break;
1824
1825 case HCI_OP_SNIFF_MODE:
1826 hci_cs_sniff_mode(hdev, ev->status);
1827 break;
1828
1829 case HCI_OP_EXIT_SNIFF_MODE:
1830 hci_cs_exit_sniff_mode(hdev, ev->status);
1831 break;
1832
1833 case HCI_OP_DISCONNECT:
1834 if (ev->status != 0)
1835 mgmt_disconnect_failed(hdev->id);
1836 break;
1837
1838 case HCI_OP_LE_CREATE_CONN:
1839 hci_cs_le_create_conn(hdev, ev->status);
1840 break;
1841
1842 default:
1843 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1844 break;
1845 }
1846
1847 if (ev->opcode != HCI_OP_NOP)
1848 del_timer(&hdev->cmd_timer);
1849
1850 if (ev->ncmd) {
1851 atomic_set(&hdev->cmd_cnt, 1);
1852 if (!skb_queue_empty(&hdev->cmd_q))
1853 tasklet_schedule(&hdev->cmd_task);
1854 }
1855 }
1856
1857 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1858 {
1859 struct hci_ev_role_change *ev = (void *) skb->data;
1860 struct hci_conn *conn;
1861
1862 BT_DBG("%s status %d", hdev->name, ev->status);
1863
1864 hci_dev_lock(hdev);
1865
1866 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1867 if (conn) {
1868 if (!ev->status) {
1869 if (ev->role)
1870 conn->link_mode &= ~HCI_LM_MASTER;
1871 else
1872 conn->link_mode |= HCI_LM_MASTER;
1873 }
1874
1875 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
1876
1877 hci_role_switch_cfm(conn, ev->status, ev->role);
1878 }
1879
1880 hci_dev_unlock(hdev);
1881 }
1882
1883 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
1884 {
1885 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
1886 __le16 *ptr;
1887 int i;
1888
1889 skb_pull(skb, sizeof(*ev));
1890
1891 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
1892
1893 if (skb->len < ev->num_hndl * 4) {
1894 BT_DBG("%s bad parameters", hdev->name);
1895 return;
1896 }
1897
1898 tasklet_disable(&hdev->tx_task);
1899
1900 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
1901 struct hci_conn *conn;
1902 __u16 handle, count;
1903
1904 handle = get_unaligned_le16(ptr++);
1905 count = get_unaligned_le16(ptr++);
1906
1907 conn = hci_conn_hash_lookup_handle(hdev, handle);
1908 if (conn) {
1909 conn->sent -= count;
1910
1911 if (conn->type == ACL_LINK) {
1912 hdev->acl_cnt += count;
1913 if (hdev->acl_cnt > hdev->acl_pkts)
1914 hdev->acl_cnt = hdev->acl_pkts;
1915 } else if (conn->type == LE_LINK) {
1916 if (hdev->le_pkts) {
1917 hdev->le_cnt += count;
1918 if (hdev->le_cnt > hdev->le_pkts)
1919 hdev->le_cnt = hdev->le_pkts;
1920 } else {
1921 hdev->acl_cnt += count;
1922 if (hdev->acl_cnt > hdev->acl_pkts)
1923 hdev->acl_cnt = hdev->acl_pkts;
1924 }
1925 } else {
1926 hdev->sco_cnt += count;
1927 if (hdev->sco_cnt > hdev->sco_pkts)
1928 hdev->sco_cnt = hdev->sco_pkts;
1929 }
1930 }
1931 }
1932
1933 tasklet_schedule(&hdev->tx_task);
1934
1935 tasklet_enable(&hdev->tx_task);
1936 }
1937
1938 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1939 {
1940 struct hci_ev_mode_change *ev = (void *) skb->data;
1941 struct hci_conn *conn;
1942
1943 BT_DBG("%s status %d", hdev->name, ev->status);
1944
1945 hci_dev_lock(hdev);
1946
1947 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1948 if (conn) {
1949 conn->mode = ev->mode;
1950 conn->interval = __le16_to_cpu(ev->interval);
1951
1952 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
1953 if (conn->mode == HCI_CM_ACTIVE)
1954 conn->power_save = 1;
1955 else
1956 conn->power_save = 0;
1957 }
1958
1959 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1960 hci_sco_setup(conn, ev->status);
1961 }
1962
1963 hci_dev_unlock(hdev);
1964 }
1965
1966 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1967 {
1968 struct hci_ev_pin_code_req *ev = (void *) skb->data;
1969 struct hci_conn *conn;
1970
1971 BT_DBG("%s", hdev->name);
1972
1973 hci_dev_lock(hdev);
1974
1975 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1976 if (conn && conn->state == BT_CONNECTED) {
1977 hci_conn_hold(conn);
1978 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1979 hci_conn_put(conn);
1980 }
1981
1982 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
1983 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1984 sizeof(ev->bdaddr), &ev->bdaddr);
1985
1986 if (test_bit(HCI_MGMT, &hdev->flags))
1987 mgmt_pin_code_request(hdev->id, &ev->bdaddr);
1988
1989 hci_dev_unlock(hdev);
1990 }
1991
1992 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1993 {
1994 struct hci_ev_link_key_req *ev = (void *) skb->data;
1995 struct hci_cp_link_key_reply cp;
1996 struct hci_conn *conn;
1997 struct link_key *key;
1998
1999 BT_DBG("%s", hdev->name);
2000
2001 if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2002 return;
2003
2004 hci_dev_lock(hdev);
2005
2006 key = hci_find_link_key(hdev, &ev->bdaddr);
2007 if (!key) {
2008 BT_DBG("%s link key not found for %s", hdev->name,
2009 batostr(&ev->bdaddr));
2010 goto not_found;
2011 }
2012
2013 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2014 batostr(&ev->bdaddr));
2015
2016 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) {
2017 BT_DBG("%s ignoring debug key", hdev->name);
2018 goto not_found;
2019 }
2020
2021 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2022
2023 if (key->type == 0x04 && conn && conn->auth_type != 0xff &&
2024 (conn->auth_type & 0x01)) {
2025 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2026 goto not_found;
2027 }
2028
2029 bacpy(&cp.bdaddr, &ev->bdaddr);
2030 memcpy(cp.link_key, key->val, 16);
2031
2032 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2033
2034 hci_dev_unlock(hdev);
2035
2036 return;
2037
2038 not_found:
2039 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2040 hci_dev_unlock(hdev);
2041 }
2042
2043 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2044 {
2045 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2046 struct hci_conn *conn;
2047 u8 pin_len = 0;
2048
2049 BT_DBG("%s", hdev->name);
2050
2051 hci_dev_lock(hdev);
2052
2053 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2054 if (conn) {
2055 hci_conn_hold(conn);
2056 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2057 pin_len = conn->pin_length;
2058 hci_conn_put(conn);
2059 }
2060
2061 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2062 hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key,
2063 ev->key_type, pin_len);
2064
2065 hci_dev_unlock(hdev);
2066 }
2067
2068 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2069 {
2070 struct hci_ev_clock_offset *ev = (void *) skb->data;
2071 struct hci_conn *conn;
2072
2073 BT_DBG("%s status %d", hdev->name, ev->status);
2074
2075 hci_dev_lock(hdev);
2076
2077 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2078 if (conn && !ev->status) {
2079 struct inquiry_entry *ie;
2080
2081 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2082 if (ie) {
2083 ie->data.clock_offset = ev->clock_offset;
2084 ie->timestamp = jiffies;
2085 }
2086 }
2087
2088 hci_dev_unlock(hdev);
2089 }
2090
2091 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2092 {
2093 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2094 struct hci_conn *conn;
2095
2096 BT_DBG("%s status %d", hdev->name, ev->status);
2097
2098 hci_dev_lock(hdev);
2099
2100 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2101 if (conn && !ev->status)
2102 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2103
2104 hci_dev_unlock(hdev);
2105 }
2106
2107 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2108 {
2109 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2110 struct inquiry_entry *ie;
2111
2112 BT_DBG("%s", hdev->name);
2113
2114 hci_dev_lock(hdev);
2115
2116 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2117 if (ie) {
2118 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2119 ie->timestamp = jiffies;
2120 }
2121
2122 hci_dev_unlock(hdev);
2123 }
2124
2125 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2126 {
2127 struct inquiry_data data;
2128 int num_rsp = *((__u8 *) skb->data);
2129
2130 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2131
2132 if (!num_rsp)
2133 return;
2134
2135 hci_dev_lock(hdev);
2136
2137 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2138 struct inquiry_info_with_rssi_and_pscan_mode *info;
2139 info = (void *) (skb->data + 1);
2140
2141 for (; num_rsp; num_rsp--) {
2142 bacpy(&data.bdaddr, &info->bdaddr);
2143 data.pscan_rep_mode = info->pscan_rep_mode;
2144 data.pscan_period_mode = info->pscan_period_mode;
2145 data.pscan_mode = info->pscan_mode;
2146 memcpy(data.dev_class, info->dev_class, 3);
2147 data.clock_offset = info->clock_offset;
2148 data.rssi = info->rssi;
2149 data.ssp_mode = 0x00;
2150 info++;
2151 hci_inquiry_cache_update(hdev, &data);
2152 }
2153 } else {
2154 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2155
2156 for (; num_rsp; num_rsp--) {
2157 bacpy(&data.bdaddr, &info->bdaddr);
2158 data.pscan_rep_mode = info->pscan_rep_mode;
2159 data.pscan_period_mode = info->pscan_period_mode;
2160 data.pscan_mode = 0x00;
2161 memcpy(data.dev_class, info->dev_class, 3);
2162 data.clock_offset = info->clock_offset;
2163 data.rssi = info->rssi;
2164 data.ssp_mode = 0x00;
2165 info++;
2166 hci_inquiry_cache_update(hdev, &data);
2167 }
2168 }
2169
2170 hci_dev_unlock(hdev);
2171 }
2172
2173 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2174 {
2175 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2176 struct hci_conn *conn;
2177
2178 BT_DBG("%s", hdev->name);
2179
2180 hci_dev_lock(hdev);
2181
2182 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2183 if (!conn)
2184 goto unlock;
2185
2186 if (!ev->status && ev->page == 0x01) {
2187 struct inquiry_entry *ie;
2188
2189 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2190 if (ie)
2191 ie->data.ssp_mode = (ev->features[0] & 0x01);
2192
2193 conn->ssp_mode = (ev->features[0] & 0x01);
2194 }
2195
2196 if (conn->state != BT_CONFIG)
2197 goto unlock;
2198
2199 if (!ev->status) {
2200 struct hci_cp_remote_name_req cp;
2201 memset(&cp, 0, sizeof(cp));
2202 bacpy(&cp.bdaddr, &conn->dst);
2203 cp.pscan_rep_mode = 0x02;
2204 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2205 }
2206
2207 if (!hci_outgoing_auth_needed(hdev, conn)) {
2208 conn->state = BT_CONNECTED;
2209 hci_proto_connect_cfm(conn, ev->status);
2210 hci_conn_put(conn);
2211 }
2212
2213 unlock:
2214 hci_dev_unlock(hdev);
2215 }
2216
2217 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2218 {
2219 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2220 struct hci_conn *conn;
2221
2222 BT_DBG("%s status %d", hdev->name, ev->status);
2223
2224 hci_dev_lock(hdev);
2225
2226 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2227 if (!conn) {
2228 if (ev->link_type == ESCO_LINK)
2229 goto unlock;
2230
2231 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2232 if (!conn)
2233 goto unlock;
2234
2235 conn->type = SCO_LINK;
2236 }
2237
2238 switch (ev->status) {
2239 case 0x00:
2240 conn->handle = __le16_to_cpu(ev->handle);
2241 conn->state = BT_CONNECTED;
2242
2243 hci_conn_hold_device(conn);
2244 hci_conn_add_sysfs(conn);
2245 break;
2246
2247 case 0x11: /* Unsupported Feature or Parameter Value */
2248 case 0x1c: /* SCO interval rejected */
2249 case 0x1a: /* Unsupported Remote Feature */
2250 case 0x1f: /* Unspecified error */
2251 if (conn->out && conn->attempt < 2) {
2252 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2253 (hdev->esco_type & EDR_ESCO_MASK);
2254 hci_setup_sync(conn, conn->link->handle);
2255 goto unlock;
2256 }
2257 /* fall through */
2258
2259 default:
2260 conn->state = BT_CLOSED;
2261 break;
2262 }
2263
2264 hci_proto_connect_cfm(conn, ev->status);
2265 if (ev->status)
2266 hci_conn_del(conn);
2267
2268 unlock:
2269 hci_dev_unlock(hdev);
2270 }
2271
2272 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2273 {
2274 BT_DBG("%s", hdev->name);
2275 }
2276
2277 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2278 {
2279 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2280
2281 BT_DBG("%s status %d", hdev->name, ev->status);
2282 }
2283
2284 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2285 {
2286 struct inquiry_data data;
2287 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2288 int num_rsp = *((__u8 *) skb->data);
2289
2290 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2291
2292 if (!num_rsp)
2293 return;
2294
2295 hci_dev_lock(hdev);
2296
2297 for (; num_rsp; num_rsp--) {
2298 bacpy(&data.bdaddr, &info->bdaddr);
2299 data.pscan_rep_mode = info->pscan_rep_mode;
2300 data.pscan_period_mode = info->pscan_period_mode;
2301 data.pscan_mode = 0x00;
2302 memcpy(data.dev_class, info->dev_class, 3);
2303 data.clock_offset = info->clock_offset;
2304 data.rssi = info->rssi;
2305 data.ssp_mode = 0x01;
2306 info++;
2307 hci_inquiry_cache_update(hdev, &data);
2308 }
2309
2310 hci_dev_unlock(hdev);
2311 }
2312
2313 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2314 {
2315 /* If remote requests dedicated bonding follow that lead */
2316 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2317 /* If both remote and local IO capabilities allow MITM
2318 * protection then require it, otherwise don't */
2319 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2320 return 0x02;
2321 else
2322 return 0x03;
2323 }
2324
2325 /* If remote requests no-bonding follow that lead */
2326 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2327 return 0x00;
2328
2329 return conn->auth_type;
2330 }
2331
2332 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2333 {
2334 struct hci_ev_io_capa_request *ev = (void *) skb->data;
2335 struct hci_conn *conn;
2336
2337 BT_DBG("%s", hdev->name);
2338
2339 hci_dev_lock(hdev);
2340
2341 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2342 if (!conn)
2343 goto unlock;
2344
2345 hci_conn_hold(conn);
2346
2347 if (!test_bit(HCI_MGMT, &hdev->flags))
2348 goto unlock;
2349
2350 if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2351 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2352 struct hci_cp_io_capability_reply cp;
2353
2354 bacpy(&cp.bdaddr, &ev->bdaddr);
2355 cp.capability = conn->io_capability;
2356 cp.oob_data = 0;
2357 cp.authentication = hci_get_auth_req(conn);
2358
2359 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2360 sizeof(cp), &cp);
2361 } else {
2362 struct hci_cp_io_capability_neg_reply cp;
2363
2364 bacpy(&cp.bdaddr, &ev->bdaddr);
2365 cp.reason = 0x16; /* Pairing not allowed */
2366
2367 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2368 sizeof(cp), &cp);
2369 }
2370
2371 unlock:
2372 hci_dev_unlock(hdev);
2373 }
2374
2375 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2376 {
2377 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2378 struct hci_conn *conn;
2379
2380 BT_DBG("%s", hdev->name);
2381
2382 hci_dev_lock(hdev);
2383
2384 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2385 if (!conn)
2386 goto unlock;
2387
2388 hci_conn_hold(conn);
2389
2390 conn->remote_cap = ev->capability;
2391 conn->remote_oob = ev->oob_data;
2392 conn->remote_auth = ev->authentication;
2393
2394 unlock:
2395 hci_dev_unlock(hdev);
2396 }
2397
2398 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2399 struct sk_buff *skb)
2400 {
2401 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2402
2403 BT_DBG("%s", hdev->name);
2404
2405 hci_dev_lock(hdev);
2406
2407 if (test_bit(HCI_MGMT, &hdev->flags))
2408 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey);
2409
2410 hci_dev_unlock(hdev);
2411 }
2412
2413 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2414 {
2415 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
2416 struct hci_conn *conn;
2417
2418 BT_DBG("%s", hdev->name);
2419
2420 hci_dev_lock(hdev);
2421
2422 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2423 if (!conn)
2424 goto unlock;
2425
2426 /* To avoid duplicate auth_failed events to user space we check
2427 * the HCI_CONN_AUTH_PEND flag which will be set if we
2428 * initiated the authentication. A traditional auth_complete
2429 * event gets always produced as initiator and is also mapped to
2430 * the mgmt_auth_failed event */
2431 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2432 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
2433
2434 hci_conn_put(conn);
2435
2436 unlock:
2437 hci_dev_unlock(hdev);
2438 }
2439
2440 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2441 {
2442 struct hci_ev_remote_host_features *ev = (void *) skb->data;
2443 struct inquiry_entry *ie;
2444
2445 BT_DBG("%s", hdev->name);
2446
2447 hci_dev_lock(hdev);
2448
2449 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2450 if (ie)
2451 ie->data.ssp_mode = (ev->features[0] & 0x01);
2452
2453 hci_dev_unlock(hdev);
2454 }
2455
2456 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2457 {
2458 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2459 struct hci_conn *conn;
2460
2461 BT_DBG("%s status %d", hdev->name, ev->status);
2462
2463 hci_dev_lock(hdev);
2464
2465 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2466 if (!conn) {
2467 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
2468 if (!conn) {
2469 BT_ERR("No memory for new connection");
2470 hci_dev_unlock(hdev);
2471 return;
2472 }
2473 }
2474
2475 if (ev->status) {
2476 hci_proto_connect_cfm(conn, ev->status);
2477 conn->state = BT_CLOSED;
2478 hci_conn_del(conn);
2479 goto unlock;
2480 }
2481
2482 conn->handle = __le16_to_cpu(ev->handle);
2483 conn->state = BT_CONNECTED;
2484
2485 hci_conn_hold_device(conn);
2486 hci_conn_add_sysfs(conn);
2487
2488 hci_proto_connect_cfm(conn, ev->status);
2489
2490 unlock:
2491 hci_dev_unlock(hdev);
2492 }
2493
2494 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495 {
2496 struct hci_ev_le_meta *le_ev = (void *) skb->data;
2497
2498 skb_pull(skb, sizeof(*le_ev));
2499
2500 switch (le_ev->subevent) {
2501 case HCI_EV_LE_CONN_COMPLETE:
2502 hci_le_conn_complete_evt(hdev, skb);
2503 break;
2504
2505 default:
2506 break;
2507 }
2508 }
2509
2510 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2511 {
2512 struct hci_event_hdr *hdr = (void *) skb->data;
2513 __u8 event = hdr->evt;
2514
2515 skb_pull(skb, HCI_EVENT_HDR_SIZE);
2516
2517 switch (event) {
2518 case HCI_EV_INQUIRY_COMPLETE:
2519 hci_inquiry_complete_evt(hdev, skb);
2520 break;
2521
2522 case HCI_EV_INQUIRY_RESULT:
2523 hci_inquiry_result_evt(hdev, skb);
2524 break;
2525
2526 case HCI_EV_CONN_COMPLETE:
2527 hci_conn_complete_evt(hdev, skb);
2528 break;
2529
2530 case HCI_EV_CONN_REQUEST:
2531 hci_conn_request_evt(hdev, skb);
2532 break;
2533
2534 case HCI_EV_DISCONN_COMPLETE:
2535 hci_disconn_complete_evt(hdev, skb);
2536 break;
2537
2538 case HCI_EV_AUTH_COMPLETE:
2539 hci_auth_complete_evt(hdev, skb);
2540 break;
2541
2542 case HCI_EV_REMOTE_NAME:
2543 hci_remote_name_evt(hdev, skb);
2544 break;
2545
2546 case HCI_EV_ENCRYPT_CHANGE:
2547 hci_encrypt_change_evt(hdev, skb);
2548 break;
2549
2550 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
2551 hci_change_link_key_complete_evt(hdev, skb);
2552 break;
2553
2554 case HCI_EV_REMOTE_FEATURES:
2555 hci_remote_features_evt(hdev, skb);
2556 break;
2557
2558 case HCI_EV_REMOTE_VERSION:
2559 hci_remote_version_evt(hdev, skb);
2560 break;
2561
2562 case HCI_EV_QOS_SETUP_COMPLETE:
2563 hci_qos_setup_complete_evt(hdev, skb);
2564 break;
2565
2566 case HCI_EV_CMD_COMPLETE:
2567 hci_cmd_complete_evt(hdev, skb);
2568 break;
2569
2570 case HCI_EV_CMD_STATUS:
2571 hci_cmd_status_evt(hdev, skb);
2572 break;
2573
2574 case HCI_EV_ROLE_CHANGE:
2575 hci_role_change_evt(hdev, skb);
2576 break;
2577
2578 case HCI_EV_NUM_COMP_PKTS:
2579 hci_num_comp_pkts_evt(hdev, skb);
2580 break;
2581
2582 case HCI_EV_MODE_CHANGE:
2583 hci_mode_change_evt(hdev, skb);
2584 break;
2585
2586 case HCI_EV_PIN_CODE_REQ:
2587 hci_pin_code_request_evt(hdev, skb);
2588 break;
2589
2590 case HCI_EV_LINK_KEY_REQ:
2591 hci_link_key_request_evt(hdev, skb);
2592 break;
2593
2594 case HCI_EV_LINK_KEY_NOTIFY:
2595 hci_link_key_notify_evt(hdev, skb);
2596 break;
2597
2598 case HCI_EV_CLOCK_OFFSET:
2599 hci_clock_offset_evt(hdev, skb);
2600 break;
2601
2602 case HCI_EV_PKT_TYPE_CHANGE:
2603 hci_pkt_type_change_evt(hdev, skb);
2604 break;
2605
2606 case HCI_EV_PSCAN_REP_MODE:
2607 hci_pscan_rep_mode_evt(hdev, skb);
2608 break;
2609
2610 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
2611 hci_inquiry_result_with_rssi_evt(hdev, skb);
2612 break;
2613
2614 case HCI_EV_REMOTE_EXT_FEATURES:
2615 hci_remote_ext_features_evt(hdev, skb);
2616 break;
2617
2618 case HCI_EV_SYNC_CONN_COMPLETE:
2619 hci_sync_conn_complete_evt(hdev, skb);
2620 break;
2621
2622 case HCI_EV_SYNC_CONN_CHANGED:
2623 hci_sync_conn_changed_evt(hdev, skb);
2624 break;
2625
2626 case HCI_EV_SNIFF_SUBRATE:
2627 hci_sniff_subrate_evt(hdev, skb);
2628 break;
2629
2630 case HCI_EV_EXTENDED_INQUIRY_RESULT:
2631 hci_extended_inquiry_result_evt(hdev, skb);
2632 break;
2633
2634 case HCI_EV_IO_CAPA_REQUEST:
2635 hci_io_capa_request_evt(hdev, skb);
2636 break;
2637
2638 case HCI_EV_IO_CAPA_REPLY:
2639 hci_io_capa_reply_evt(hdev, skb);
2640 break;
2641
2642 case HCI_EV_USER_CONFIRM_REQUEST:
2643 hci_user_confirm_request_evt(hdev, skb);
2644 break;
2645
2646 case HCI_EV_SIMPLE_PAIR_COMPLETE:
2647 hci_simple_pair_complete_evt(hdev, skb);
2648 break;
2649
2650 case HCI_EV_REMOTE_HOST_FEATURES:
2651 hci_remote_host_features_evt(hdev, skb);
2652 break;
2653
2654 case HCI_EV_LE_META:
2655 hci_le_meta_evt(hdev, skb);
2656 break;
2657
2658 default:
2659 BT_DBG("%s event 0x%x", hdev->name, event);
2660 break;
2661 }
2662
2663 kfree_skb(skb);
2664 hdev->stat.evt_rx++;
2665 }
2666
2667 /* Generate internal stack event */
2668 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
2669 {
2670 struct hci_event_hdr *hdr;
2671 struct hci_ev_stack_internal *ev;
2672 struct sk_buff *skb;
2673
2674 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
2675 if (!skb)
2676 return;
2677
2678 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
2679 hdr->evt = HCI_EV_STACK_INTERNAL;
2680 hdr->plen = sizeof(*ev) + dlen;
2681
2682 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
2683 ev->type = type;
2684 memcpy(ev->data, data, dlen);
2685
2686 bt_cb(skb)->incoming = 1;
2687 __net_timestamp(skb);
2688
2689 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
2690 skb->dev = (void *) hdev;
2691 hci_send_to_sock(hdev, skb, NULL);
2692 kfree_skb(skb);
2693 }