Merge branch 'viafb-next' of git://github.com/schandinat/linux-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 /* Handle HCI Event packets */
49
50 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
51 {
52 __u8 status = *((__u8 *) skb->data);
53
54 BT_DBG("%s status 0x%x", hdev->name, status);
55
56 if (status)
57 return;
58
59 if (test_bit(HCI_MGMT, &hdev->flags) &&
60 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
61 mgmt_discovering(hdev->id, 0);
62
63 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
64
65 hci_conn_check_pending(hdev);
66 }
67
68 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
69 {
70 __u8 status = *((__u8 *) skb->data);
71
72 BT_DBG("%s status 0x%x", hdev->name, status);
73
74 if (status)
75 return;
76
77 if (test_bit(HCI_MGMT, &hdev->flags) &&
78 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
79 mgmt_discovering(hdev->id, 0);
80
81 hci_conn_check_pending(hdev);
82 }
83
84 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
85 {
86 BT_DBG("%s", hdev->name);
87 }
88
89 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
90 {
91 struct hci_rp_role_discovery *rp = (void *) skb->data;
92 struct hci_conn *conn;
93
94 BT_DBG("%s status 0x%x", hdev->name, rp->status);
95
96 if (rp->status)
97 return;
98
99 hci_dev_lock(hdev);
100
101 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
102 if (conn) {
103 if (rp->role)
104 conn->link_mode &= ~HCI_LM_MASTER;
105 else
106 conn->link_mode |= HCI_LM_MASTER;
107 }
108
109 hci_dev_unlock(hdev);
110 }
111
112 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
113 {
114 struct hci_rp_read_link_policy *rp = (void *) skb->data;
115 struct hci_conn *conn;
116
117 BT_DBG("%s status 0x%x", hdev->name, rp->status);
118
119 if (rp->status)
120 return;
121
122 hci_dev_lock(hdev);
123
124 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
125 if (conn)
126 conn->link_policy = __le16_to_cpu(rp->policy);
127
128 hci_dev_unlock(hdev);
129 }
130
131 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
132 {
133 struct hci_rp_write_link_policy *rp = (void *) skb->data;
134 struct hci_conn *conn;
135 void *sent;
136
137 BT_DBG("%s status 0x%x", hdev->name, rp->status);
138
139 if (rp->status)
140 return;
141
142 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
143 if (!sent)
144 return;
145
146 hci_dev_lock(hdev);
147
148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 if (conn)
150 conn->link_policy = get_unaligned_le16(sent + 2);
151
152 hci_dev_unlock(hdev);
153 }
154
155 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156 {
157 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
158
159 BT_DBG("%s status 0x%x", hdev->name, rp->status);
160
161 if (rp->status)
162 return;
163
164 hdev->link_policy = __le16_to_cpu(rp->policy);
165 }
166
167 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
168 {
169 __u8 status = *((__u8 *) skb->data);
170 void *sent;
171
172 BT_DBG("%s status 0x%x", hdev->name, status);
173
174 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
175 if (!sent)
176 return;
177
178 if (!status)
179 hdev->link_policy = get_unaligned_le16(sent);
180
181 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
182 }
183
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185 {
186 __u8 status = *((__u8 *) skb->data);
187
188 BT_DBG("%s status 0x%x", hdev->name, status);
189
190 clear_bit(HCI_RESET, &hdev->flags);
191
192 hci_req_complete(hdev, HCI_OP_RESET, status);
193 }
194
195 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
196 {
197 __u8 status = *((__u8 *) skb->data);
198 void *sent;
199
200 BT_DBG("%s status 0x%x", hdev->name, status);
201
202 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
203 if (!sent)
204 return;
205
206 if (test_bit(HCI_MGMT, &hdev->flags))
207 mgmt_set_local_name_complete(hdev->id, sent, status);
208
209 if (status)
210 return;
211
212 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
213 }
214
215 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
216 {
217 struct hci_rp_read_local_name *rp = (void *) skb->data;
218
219 BT_DBG("%s status 0x%x", hdev->name, rp->status);
220
221 if (rp->status)
222 return;
223
224 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
225 }
226
227 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
228 {
229 __u8 status = *((__u8 *) skb->data);
230 void *sent;
231
232 BT_DBG("%s status 0x%x", hdev->name, status);
233
234 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
235 if (!sent)
236 return;
237
238 if (!status) {
239 __u8 param = *((__u8 *) sent);
240
241 if (param == AUTH_ENABLED)
242 set_bit(HCI_AUTH, &hdev->flags);
243 else
244 clear_bit(HCI_AUTH, &hdev->flags);
245 }
246
247 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
248 }
249
250 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
251 {
252 __u8 status = *((__u8 *) skb->data);
253 void *sent;
254
255 BT_DBG("%s status 0x%x", hdev->name, status);
256
257 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
258 if (!sent)
259 return;
260
261 if (!status) {
262 __u8 param = *((__u8 *) sent);
263
264 if (param)
265 set_bit(HCI_ENCRYPT, &hdev->flags);
266 else
267 clear_bit(HCI_ENCRYPT, &hdev->flags);
268 }
269
270 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
271 }
272
273 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
274 {
275 __u8 status = *((__u8 *) skb->data);
276 void *sent;
277
278 BT_DBG("%s status 0x%x", hdev->name, status);
279
280 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
281 if (!sent)
282 return;
283
284 if (!status) {
285 __u8 param = *((__u8 *) sent);
286 int old_pscan, old_iscan;
287
288 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
289 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
290
291 if (param & SCAN_INQUIRY) {
292 set_bit(HCI_ISCAN, &hdev->flags);
293 if (!old_iscan)
294 mgmt_discoverable(hdev->id, 1);
295 } else if (old_iscan)
296 mgmt_discoverable(hdev->id, 0);
297
298 if (param & SCAN_PAGE) {
299 set_bit(HCI_PSCAN, &hdev->flags);
300 if (!old_pscan)
301 mgmt_connectable(hdev->id, 1);
302 } else if (old_pscan)
303 mgmt_connectable(hdev->id, 0);
304 }
305
306 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
307 }
308
309 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
310 {
311 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
312
313 BT_DBG("%s status 0x%x", hdev->name, rp->status);
314
315 if (rp->status)
316 return;
317
318 memcpy(hdev->dev_class, rp->dev_class, 3);
319
320 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
321 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
322 }
323
324 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
325 {
326 __u8 status = *((__u8 *) skb->data);
327 void *sent;
328
329 BT_DBG("%s status 0x%x", hdev->name, status);
330
331 if (status)
332 return;
333
334 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
335 if (!sent)
336 return;
337
338 memcpy(hdev->dev_class, sent, 3);
339 }
340
341 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
342 {
343 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
344 __u16 setting;
345
346 BT_DBG("%s status 0x%x", hdev->name, rp->status);
347
348 if (rp->status)
349 return;
350
351 setting = __le16_to_cpu(rp->voice_setting);
352
353 if (hdev->voice_setting == setting)
354 return;
355
356 hdev->voice_setting = setting;
357
358 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
359
360 if (hdev->notify) {
361 tasklet_disable(&hdev->tx_task);
362 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
363 tasklet_enable(&hdev->tx_task);
364 }
365 }
366
367 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
368 {
369 __u8 status = *((__u8 *) skb->data);
370 __u16 setting;
371 void *sent;
372
373 BT_DBG("%s status 0x%x", hdev->name, status);
374
375 if (status)
376 return;
377
378 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
379 if (!sent)
380 return;
381
382 setting = get_unaligned_le16(sent);
383
384 if (hdev->voice_setting == setting)
385 return;
386
387 hdev->voice_setting = setting;
388
389 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
390
391 if (hdev->notify) {
392 tasklet_disable(&hdev->tx_task);
393 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
394 tasklet_enable(&hdev->tx_task);
395 }
396 }
397
398 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
399 {
400 __u8 status = *((__u8 *) skb->data);
401
402 BT_DBG("%s status 0x%x", hdev->name, status);
403
404 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
405 }
406
407 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
408 {
409 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
410
411 BT_DBG("%s status 0x%x", hdev->name, rp->status);
412
413 if (rp->status)
414 return;
415
416 hdev->ssp_mode = rp->mode;
417 }
418
419 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
420 {
421 __u8 status = *((__u8 *) skb->data);
422 void *sent;
423
424 BT_DBG("%s status 0x%x", hdev->name, status);
425
426 if (status)
427 return;
428
429 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
430 if (!sent)
431 return;
432
433 hdev->ssp_mode = *((__u8 *) sent);
434 }
435
436 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
437 {
438 if (hdev->features[6] & LMP_EXT_INQ)
439 return 2;
440
441 if (hdev->features[3] & LMP_RSSI_INQ)
442 return 1;
443
444 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
445 hdev->lmp_subver == 0x0757)
446 return 1;
447
448 if (hdev->manufacturer == 15) {
449 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
450 return 1;
451 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
452 return 1;
453 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
454 return 1;
455 }
456
457 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
458 hdev->lmp_subver == 0x1805)
459 return 1;
460
461 return 0;
462 }
463
464 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
465 {
466 u8 mode;
467
468 mode = hci_get_inquiry_mode(hdev);
469
470 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
471 }
472
473 static void hci_setup_event_mask(struct hci_dev *hdev)
474 {
475 /* The second byte is 0xff instead of 0x9f (two reserved bits
476 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
477 * command otherwise */
478 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
479
480 /* Events for 1.2 and newer controllers */
481 if (hdev->lmp_ver > 1) {
482 events[4] |= 0x01; /* Flow Specification Complete */
483 events[4] |= 0x02; /* Inquiry Result with RSSI */
484 events[4] |= 0x04; /* Read Remote Extended Features Complete */
485 events[5] |= 0x08; /* Synchronous Connection Complete */
486 events[5] |= 0x10; /* Synchronous Connection Changed */
487 }
488
489 if (hdev->features[3] & LMP_RSSI_INQ)
490 events[4] |= 0x04; /* Inquiry Result with RSSI */
491
492 if (hdev->features[5] & LMP_SNIFF_SUBR)
493 events[5] |= 0x20; /* Sniff Subrating */
494
495 if (hdev->features[5] & LMP_PAUSE_ENC)
496 events[5] |= 0x80; /* Encryption Key Refresh Complete */
497
498 if (hdev->features[6] & LMP_EXT_INQ)
499 events[5] |= 0x40; /* Extended Inquiry Result */
500
501 if (hdev->features[6] & LMP_NO_FLUSH)
502 events[7] |= 0x01; /* Enhanced Flush Complete */
503
504 if (hdev->features[7] & LMP_LSTO)
505 events[6] |= 0x80; /* Link Supervision Timeout Changed */
506
507 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
508 events[6] |= 0x01; /* IO Capability Request */
509 events[6] |= 0x02; /* IO Capability Response */
510 events[6] |= 0x04; /* User Confirmation Request */
511 events[6] |= 0x08; /* User Passkey Request */
512 events[6] |= 0x10; /* Remote OOB Data Request */
513 events[6] |= 0x20; /* Simple Pairing Complete */
514 events[7] |= 0x04; /* User Passkey Notification */
515 events[7] |= 0x08; /* Keypress Notification */
516 events[7] |= 0x10; /* Remote Host Supported
517 * Features Notification */
518 }
519
520 if (hdev->features[4] & LMP_LE)
521 events[7] |= 0x20; /* LE Meta-Event */
522
523 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
524 }
525
526 static void hci_setup(struct hci_dev *hdev)
527 {
528 hci_setup_event_mask(hdev);
529
530 if (hdev->lmp_ver > 1)
531 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
532
533 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
534 u8 mode = 0x01;
535 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
536 }
537
538 if (hdev->features[3] & LMP_RSSI_INQ)
539 hci_setup_inquiry_mode(hdev);
540
541 if (hdev->features[7] & LMP_INQ_TX_PWR)
542 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
543 }
544
545 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
546 {
547 struct hci_rp_read_local_version *rp = (void *) skb->data;
548
549 BT_DBG("%s status 0x%x", hdev->name, rp->status);
550
551 if (rp->status)
552 return;
553
554 hdev->hci_ver = rp->hci_ver;
555 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
556 hdev->lmp_ver = rp->lmp_ver;
557 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
558 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
559
560 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
561 hdev->manufacturer,
562 hdev->hci_ver, hdev->hci_rev);
563
564 if (test_bit(HCI_INIT, &hdev->flags))
565 hci_setup(hdev);
566 }
567
568 static void hci_setup_link_policy(struct hci_dev *hdev)
569 {
570 u16 link_policy = 0;
571
572 if (hdev->features[0] & LMP_RSWITCH)
573 link_policy |= HCI_LP_RSWITCH;
574 if (hdev->features[0] & LMP_HOLD)
575 link_policy |= HCI_LP_HOLD;
576 if (hdev->features[0] & LMP_SNIFF)
577 link_policy |= HCI_LP_SNIFF;
578 if (hdev->features[1] & LMP_PARK)
579 link_policy |= HCI_LP_PARK;
580
581 link_policy = cpu_to_le16(link_policy);
582 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
583 sizeof(link_policy), &link_policy);
584 }
585
586 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
587 {
588 struct hci_rp_read_local_commands *rp = (void *) skb->data;
589
590 BT_DBG("%s status 0x%x", hdev->name, rp->status);
591
592 if (rp->status)
593 goto done;
594
595 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
596
597 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
598 hci_setup_link_policy(hdev);
599
600 done:
601 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
602 }
603
604 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
605 {
606 struct hci_rp_read_local_features *rp = (void *) skb->data;
607
608 BT_DBG("%s status 0x%x", hdev->name, rp->status);
609
610 if (rp->status)
611 return;
612
613 memcpy(hdev->features, rp->features, 8);
614
615 /* Adjust default settings according to features
616 * supported by device. */
617
618 if (hdev->features[0] & LMP_3SLOT)
619 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
620
621 if (hdev->features[0] & LMP_5SLOT)
622 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
623
624 if (hdev->features[1] & LMP_HV2) {
625 hdev->pkt_type |= (HCI_HV2);
626 hdev->esco_type |= (ESCO_HV2);
627 }
628
629 if (hdev->features[1] & LMP_HV3) {
630 hdev->pkt_type |= (HCI_HV3);
631 hdev->esco_type |= (ESCO_HV3);
632 }
633
634 if (hdev->features[3] & LMP_ESCO)
635 hdev->esco_type |= (ESCO_EV3);
636
637 if (hdev->features[4] & LMP_EV4)
638 hdev->esco_type |= (ESCO_EV4);
639
640 if (hdev->features[4] & LMP_EV5)
641 hdev->esco_type |= (ESCO_EV5);
642
643 if (hdev->features[5] & LMP_EDR_ESCO_2M)
644 hdev->esco_type |= (ESCO_2EV3);
645
646 if (hdev->features[5] & LMP_EDR_ESCO_3M)
647 hdev->esco_type |= (ESCO_3EV3);
648
649 if (hdev->features[5] & LMP_EDR_3S_ESCO)
650 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
651
652 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
653 hdev->features[0], hdev->features[1],
654 hdev->features[2], hdev->features[3],
655 hdev->features[4], hdev->features[5],
656 hdev->features[6], hdev->features[7]);
657 }
658
659 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
660 {
661 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
662
663 BT_DBG("%s status 0x%x", hdev->name, rp->status);
664
665 if (rp->status)
666 return;
667
668 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
669 hdev->sco_mtu = rp->sco_mtu;
670 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
671 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
672
673 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
674 hdev->sco_mtu = 64;
675 hdev->sco_pkts = 8;
676 }
677
678 hdev->acl_cnt = hdev->acl_pkts;
679 hdev->sco_cnt = hdev->sco_pkts;
680
681 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
682 hdev->acl_mtu, hdev->acl_pkts,
683 hdev->sco_mtu, hdev->sco_pkts);
684 }
685
686 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
687 {
688 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
689
690 BT_DBG("%s status 0x%x", hdev->name, rp->status);
691
692 if (!rp->status)
693 bacpy(&hdev->bdaddr, &rp->bdaddr);
694
695 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
696 }
697
698 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
699 {
700 __u8 status = *((__u8 *) skb->data);
701
702 BT_DBG("%s status 0x%x", hdev->name, status);
703
704 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
705 }
706
707 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
708 struct sk_buff *skb)
709 {
710 __u8 status = *((__u8 *) skb->data);
711
712 BT_DBG("%s status 0x%x", hdev->name, status);
713
714 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
715 }
716
717 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
718 {
719 __u8 status = *((__u8 *) skb->data);
720
721 BT_DBG("%s status 0x%x", hdev->name, status);
722
723 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
724 }
725
726 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
727 struct sk_buff *skb)
728 {
729 __u8 status = *((__u8 *) skb->data);
730
731 BT_DBG("%s status 0x%x", hdev->name, status);
732
733 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
734 }
735
736 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
737 struct sk_buff *skb)
738 {
739 __u8 status = *((__u8 *) skb->data);
740
741 BT_DBG("%s status 0x%x", hdev->name, status);
742
743 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
744 }
745
746 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
747 {
748 __u8 status = *((__u8 *) skb->data);
749
750 BT_DBG("%s status 0x%x", hdev->name, status);
751
752 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
753 }
754
755 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
756 {
757 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
758 struct hci_cp_pin_code_reply *cp;
759 struct hci_conn *conn;
760
761 BT_DBG("%s status 0x%x", hdev->name, rp->status);
762
763 if (test_bit(HCI_MGMT, &hdev->flags))
764 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
765
766 if (rp->status != 0)
767 return;
768
769 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
770 if (!cp)
771 return;
772
773 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
774 if (conn)
775 conn->pin_length = cp->pin_len;
776 }
777
778 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
779 {
780 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
781
782 BT_DBG("%s status 0x%x", hdev->name, rp->status);
783
784 if (test_bit(HCI_MGMT, &hdev->flags))
785 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
786 rp->status);
787 }
788 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
789 struct sk_buff *skb)
790 {
791 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
792
793 BT_DBG("%s status 0x%x", hdev->name, rp->status);
794
795 if (rp->status)
796 return;
797
798 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
799 hdev->le_pkts = rp->le_max_pkt;
800
801 hdev->le_cnt = hdev->le_pkts;
802
803 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
804
805 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
806 }
807
808 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
809 {
810 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
811
812 BT_DBG("%s status 0x%x", hdev->name, rp->status);
813
814 if (test_bit(HCI_MGMT, &hdev->flags))
815 mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
816 rp->status);
817 }
818
819 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
820 struct sk_buff *skb)
821 {
822 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
823
824 BT_DBG("%s status 0x%x", hdev->name, rp->status);
825
826 if (test_bit(HCI_MGMT, &hdev->flags))
827 mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
828 rp->status);
829 }
830
831 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
832 struct sk_buff *skb)
833 {
834 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
835
836 BT_DBG("%s status 0x%x", hdev->name, rp->status);
837
838 mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
839 rp->randomizer, rp->status);
840 }
841
842 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
843 {
844 BT_DBG("%s status 0x%x", hdev->name, status);
845
846 if (status) {
847 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
848 hci_conn_check_pending(hdev);
849 return;
850 }
851
852 if (test_bit(HCI_MGMT, &hdev->flags) &&
853 !test_and_set_bit(HCI_INQUIRY,
854 &hdev->flags))
855 mgmt_discovering(hdev->id, 1);
856 }
857
858 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
859 {
860 struct hci_cp_create_conn *cp;
861 struct hci_conn *conn;
862
863 BT_DBG("%s status 0x%x", hdev->name, status);
864
865 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
866 if (!cp)
867 return;
868
869 hci_dev_lock(hdev);
870
871 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
872
873 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
874
875 if (status) {
876 if (conn && conn->state == BT_CONNECT) {
877 if (status != 0x0c || conn->attempt > 2) {
878 conn->state = BT_CLOSED;
879 hci_proto_connect_cfm(conn, status);
880 hci_conn_del(conn);
881 } else
882 conn->state = BT_CONNECT2;
883 }
884 } else {
885 if (!conn) {
886 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
887 if (conn) {
888 conn->out = 1;
889 conn->link_mode |= HCI_LM_MASTER;
890 } else
891 BT_ERR("No memory for new connection");
892 }
893 }
894
895 hci_dev_unlock(hdev);
896 }
897
898 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
899 {
900 struct hci_cp_add_sco *cp;
901 struct hci_conn *acl, *sco;
902 __u16 handle;
903
904 BT_DBG("%s status 0x%x", hdev->name, status);
905
906 if (!status)
907 return;
908
909 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
910 if (!cp)
911 return;
912
913 handle = __le16_to_cpu(cp->handle);
914
915 BT_DBG("%s handle %d", hdev->name, handle);
916
917 hci_dev_lock(hdev);
918
919 acl = hci_conn_hash_lookup_handle(hdev, handle);
920 if (acl) {
921 sco = acl->link;
922 if (sco) {
923 sco->state = BT_CLOSED;
924
925 hci_proto_connect_cfm(sco, status);
926 hci_conn_del(sco);
927 }
928 }
929
930 hci_dev_unlock(hdev);
931 }
932
933 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
934 {
935 struct hci_cp_auth_requested *cp;
936 struct hci_conn *conn;
937
938 BT_DBG("%s status 0x%x", hdev->name, status);
939
940 if (!status)
941 return;
942
943 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
944 if (!cp)
945 return;
946
947 hci_dev_lock(hdev);
948
949 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
950 if (conn) {
951 if (conn->state == BT_CONFIG) {
952 hci_proto_connect_cfm(conn, status);
953 hci_conn_put(conn);
954 }
955 }
956
957 hci_dev_unlock(hdev);
958 }
959
960 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
961 {
962 struct hci_cp_set_conn_encrypt *cp;
963 struct hci_conn *conn;
964
965 BT_DBG("%s status 0x%x", hdev->name, status);
966
967 if (!status)
968 return;
969
970 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
971 if (!cp)
972 return;
973
974 hci_dev_lock(hdev);
975
976 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
977 if (conn) {
978 if (conn->state == BT_CONFIG) {
979 hci_proto_connect_cfm(conn, status);
980 hci_conn_put(conn);
981 }
982 }
983
984 hci_dev_unlock(hdev);
985 }
986
987 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
988 struct hci_conn *conn)
989 {
990 if (conn->state != BT_CONFIG || !conn->out)
991 return 0;
992
993 if (conn->pending_sec_level == BT_SECURITY_SDP)
994 return 0;
995
996 /* Only request authentication for SSP connections or non-SSP
997 * devices with sec_level HIGH */
998 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
999 conn->pending_sec_level != BT_SECURITY_HIGH)
1000 return 0;
1001
1002 return 1;
1003 }
1004
1005 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1006 {
1007 struct hci_cp_remote_name_req *cp;
1008 struct hci_conn *conn;
1009
1010 BT_DBG("%s status 0x%x", hdev->name, status);
1011
1012 /* If successful wait for the name req complete event before
1013 * checking for the need to do authentication */
1014 if (!status)
1015 return;
1016
1017 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1018 if (!cp)
1019 return;
1020
1021 hci_dev_lock(hdev);
1022
1023 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1024 if (!conn)
1025 goto unlock;
1026
1027 if (!hci_outgoing_auth_needed(hdev, conn))
1028 goto unlock;
1029
1030 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1031 struct hci_cp_auth_requested cp;
1032 cp.handle = __cpu_to_le16(conn->handle);
1033 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1034 }
1035
1036 unlock:
1037 hci_dev_unlock(hdev);
1038 }
1039
1040 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1041 {
1042 struct hci_cp_read_remote_features *cp;
1043 struct hci_conn *conn;
1044
1045 BT_DBG("%s status 0x%x", hdev->name, status);
1046
1047 if (!status)
1048 return;
1049
1050 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1051 if (!cp)
1052 return;
1053
1054 hci_dev_lock(hdev);
1055
1056 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1057 if (conn) {
1058 if (conn->state == BT_CONFIG) {
1059 hci_proto_connect_cfm(conn, status);
1060 hci_conn_put(conn);
1061 }
1062 }
1063
1064 hci_dev_unlock(hdev);
1065 }
1066
1067 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1068 {
1069 struct hci_cp_read_remote_ext_features *cp;
1070 struct hci_conn *conn;
1071
1072 BT_DBG("%s status 0x%x", hdev->name, status);
1073
1074 if (!status)
1075 return;
1076
1077 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1078 if (!cp)
1079 return;
1080
1081 hci_dev_lock(hdev);
1082
1083 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1084 if (conn) {
1085 if (conn->state == BT_CONFIG) {
1086 hci_proto_connect_cfm(conn, status);
1087 hci_conn_put(conn);
1088 }
1089 }
1090
1091 hci_dev_unlock(hdev);
1092 }
1093
1094 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1095 {
1096 struct hci_cp_setup_sync_conn *cp;
1097 struct hci_conn *acl, *sco;
1098 __u16 handle;
1099
1100 BT_DBG("%s status 0x%x", hdev->name, status);
1101
1102 if (!status)
1103 return;
1104
1105 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1106 if (!cp)
1107 return;
1108
1109 handle = __le16_to_cpu(cp->handle);
1110
1111 BT_DBG("%s handle %d", hdev->name, handle);
1112
1113 hci_dev_lock(hdev);
1114
1115 acl = hci_conn_hash_lookup_handle(hdev, handle);
1116 if (acl) {
1117 sco = acl->link;
1118 if (sco) {
1119 sco->state = BT_CLOSED;
1120
1121 hci_proto_connect_cfm(sco, status);
1122 hci_conn_del(sco);
1123 }
1124 }
1125
1126 hci_dev_unlock(hdev);
1127 }
1128
1129 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1130 {
1131 struct hci_cp_sniff_mode *cp;
1132 struct hci_conn *conn;
1133
1134 BT_DBG("%s status 0x%x", hdev->name, status);
1135
1136 if (!status)
1137 return;
1138
1139 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1140 if (!cp)
1141 return;
1142
1143 hci_dev_lock(hdev);
1144
1145 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1146 if (conn) {
1147 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1148
1149 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1150 hci_sco_setup(conn, status);
1151 }
1152
1153 hci_dev_unlock(hdev);
1154 }
1155
1156 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1157 {
1158 struct hci_cp_exit_sniff_mode *cp;
1159 struct hci_conn *conn;
1160
1161 BT_DBG("%s status 0x%x", hdev->name, status);
1162
1163 if (!status)
1164 return;
1165
1166 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1167 if (!cp)
1168 return;
1169
1170 hci_dev_lock(hdev);
1171
1172 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1173 if (conn) {
1174 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1175
1176 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1177 hci_sco_setup(conn, status);
1178 }
1179
1180 hci_dev_unlock(hdev);
1181 }
1182
1183 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1184 {
1185 struct hci_cp_le_create_conn *cp;
1186 struct hci_conn *conn;
1187
1188 BT_DBG("%s status 0x%x", hdev->name, status);
1189
1190 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1191 if (!cp)
1192 return;
1193
1194 hci_dev_lock(hdev);
1195
1196 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1197
1198 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1199 conn);
1200
1201 if (status) {
1202 if (conn && conn->state == BT_CONNECT) {
1203 conn->state = BT_CLOSED;
1204 hci_proto_connect_cfm(conn, status);
1205 hci_conn_del(conn);
1206 }
1207 } else {
1208 if (!conn) {
1209 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1210 if (conn)
1211 conn->out = 1;
1212 else
1213 BT_ERR("No memory for new connection");
1214 }
1215 }
1216
1217 hci_dev_unlock(hdev);
1218 }
1219
1220 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1221 {
1222 __u8 status = *((__u8 *) skb->data);
1223
1224 BT_DBG("%s status %d", hdev->name, status);
1225
1226 if (test_bit(HCI_MGMT, &hdev->flags) &&
1227 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1228 mgmt_discovering(hdev->id, 0);
1229
1230 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1231
1232 hci_conn_check_pending(hdev);
1233 }
1234
1235 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1236 {
1237 struct inquiry_data data;
1238 struct inquiry_info *info = (void *) (skb->data + 1);
1239 int num_rsp = *((__u8 *) skb->data);
1240
1241 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1242
1243 if (!num_rsp)
1244 return;
1245
1246 hci_dev_lock(hdev);
1247
1248 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
1249
1250 if (test_bit(HCI_MGMT, &hdev->flags))
1251 mgmt_discovering(hdev->id, 1);
1252 }
1253
1254 for (; num_rsp; num_rsp--, info++) {
1255 bacpy(&data.bdaddr, &info->bdaddr);
1256 data.pscan_rep_mode = info->pscan_rep_mode;
1257 data.pscan_period_mode = info->pscan_period_mode;
1258 data.pscan_mode = info->pscan_mode;
1259 memcpy(data.dev_class, info->dev_class, 3);
1260 data.clock_offset = info->clock_offset;
1261 data.rssi = 0x00;
1262 data.ssp_mode = 0x00;
1263 hci_inquiry_cache_update(hdev, &data);
1264 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
1265 NULL);
1266 }
1267
1268 hci_dev_unlock(hdev);
1269 }
1270
1271 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1272 {
1273 struct hci_ev_conn_complete *ev = (void *) skb->data;
1274 struct hci_conn *conn;
1275
1276 BT_DBG("%s", hdev->name);
1277
1278 hci_dev_lock(hdev);
1279
1280 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1281 if (!conn) {
1282 if (ev->link_type != SCO_LINK)
1283 goto unlock;
1284
1285 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1286 if (!conn)
1287 goto unlock;
1288
1289 conn->type = SCO_LINK;
1290 }
1291
1292 if (!ev->status) {
1293 conn->handle = __le16_to_cpu(ev->handle);
1294
1295 if (conn->type == ACL_LINK) {
1296 conn->state = BT_CONFIG;
1297 hci_conn_hold(conn);
1298 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1299 mgmt_connected(hdev->id, &ev->bdaddr);
1300 } else
1301 conn->state = BT_CONNECTED;
1302
1303 hci_conn_hold_device(conn);
1304 hci_conn_add_sysfs(conn);
1305
1306 if (test_bit(HCI_AUTH, &hdev->flags))
1307 conn->link_mode |= HCI_LM_AUTH;
1308
1309 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1310 conn->link_mode |= HCI_LM_ENCRYPT;
1311
1312 /* Get remote features */
1313 if (conn->type == ACL_LINK) {
1314 struct hci_cp_read_remote_features cp;
1315 cp.handle = ev->handle;
1316 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1317 sizeof(cp), &cp);
1318 }
1319
1320 /* Set packet type for incoming connection */
1321 if (!conn->out && hdev->hci_ver < 3) {
1322 struct hci_cp_change_conn_ptype cp;
1323 cp.handle = ev->handle;
1324 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1325 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1326 sizeof(cp), &cp);
1327 }
1328 } else {
1329 conn->state = BT_CLOSED;
1330 if (conn->type == ACL_LINK)
1331 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
1332 }
1333
1334 if (conn->type == ACL_LINK)
1335 hci_sco_setup(conn, ev->status);
1336
1337 if (ev->status) {
1338 hci_proto_connect_cfm(conn, ev->status);
1339 hci_conn_del(conn);
1340 } else if (ev->link_type != ACL_LINK)
1341 hci_proto_connect_cfm(conn, ev->status);
1342
1343 unlock:
1344 hci_dev_unlock(hdev);
1345
1346 hci_conn_check_pending(hdev);
1347 }
1348
1349 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1350 {
1351 struct hci_ev_conn_request *ev = (void *) skb->data;
1352 int mask = hdev->link_mode;
1353
1354 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1355 batostr(&ev->bdaddr), ev->link_type);
1356
1357 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1358
1359 if ((mask & HCI_LM_ACCEPT) &&
1360 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1361 /* Connection accepted */
1362 struct inquiry_entry *ie;
1363 struct hci_conn *conn;
1364
1365 hci_dev_lock(hdev);
1366
1367 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1368 if (ie)
1369 memcpy(ie->data.dev_class, ev->dev_class, 3);
1370
1371 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1372 if (!conn) {
1373 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1374 if (!conn) {
1375 BT_ERR("No memory for new connection");
1376 hci_dev_unlock(hdev);
1377 return;
1378 }
1379 }
1380
1381 memcpy(conn->dev_class, ev->dev_class, 3);
1382 conn->state = BT_CONNECT;
1383
1384 hci_dev_unlock(hdev);
1385
1386 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1387 struct hci_cp_accept_conn_req cp;
1388
1389 bacpy(&cp.bdaddr, &ev->bdaddr);
1390
1391 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1392 cp.role = 0x00; /* Become master */
1393 else
1394 cp.role = 0x01; /* Remain slave */
1395
1396 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1397 sizeof(cp), &cp);
1398 } else {
1399 struct hci_cp_accept_sync_conn_req cp;
1400
1401 bacpy(&cp.bdaddr, &ev->bdaddr);
1402 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1403
1404 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1405 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1406 cp.max_latency = cpu_to_le16(0xffff);
1407 cp.content_format = cpu_to_le16(hdev->voice_setting);
1408 cp.retrans_effort = 0xff;
1409
1410 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1411 sizeof(cp), &cp);
1412 }
1413 } else {
1414 /* Connection rejected */
1415 struct hci_cp_reject_conn_req cp;
1416
1417 bacpy(&cp.bdaddr, &ev->bdaddr);
1418 cp.reason = 0x0f;
1419 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1420 }
1421 }
1422
1423 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1424 {
1425 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1426 struct hci_conn *conn;
1427
1428 BT_DBG("%s status %d", hdev->name, ev->status);
1429
1430 if (ev->status) {
1431 mgmt_disconnect_failed(hdev->id);
1432 return;
1433 }
1434
1435 hci_dev_lock(hdev);
1436
1437 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1438 if (!conn)
1439 goto unlock;
1440
1441 conn->state = BT_CLOSED;
1442
1443 if (conn->type == ACL_LINK || conn->type == LE_LINK)
1444 mgmt_disconnected(hdev->id, &conn->dst);
1445
1446 hci_proto_disconn_cfm(conn, ev->reason);
1447 hci_conn_del(conn);
1448
1449 unlock:
1450 hci_dev_unlock(hdev);
1451 }
1452
1453 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1454 {
1455 struct hci_ev_auth_complete *ev = (void *) skb->data;
1456 struct hci_conn *conn;
1457
1458 BT_DBG("%s status %d", hdev->name, ev->status);
1459
1460 hci_dev_lock(hdev);
1461
1462 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1463 if (conn) {
1464 if (!ev->status) {
1465 conn->link_mode |= HCI_LM_AUTH;
1466 conn->sec_level = conn->pending_sec_level;
1467 } else {
1468 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1469 }
1470
1471 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1472
1473 if (conn->state == BT_CONFIG) {
1474 if (!ev->status && hdev->ssp_mode > 0 &&
1475 conn->ssp_mode > 0) {
1476 struct hci_cp_set_conn_encrypt cp;
1477 cp.handle = ev->handle;
1478 cp.encrypt = 0x01;
1479 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
1480 sizeof(cp), &cp);
1481 } else {
1482 conn->state = BT_CONNECTED;
1483 hci_proto_connect_cfm(conn, ev->status);
1484 hci_conn_put(conn);
1485 }
1486 } else {
1487 hci_auth_cfm(conn, ev->status);
1488
1489 hci_conn_hold(conn);
1490 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1491 hci_conn_put(conn);
1492 }
1493
1494 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1495 if (!ev->status) {
1496 struct hci_cp_set_conn_encrypt cp;
1497 cp.handle = ev->handle;
1498 cp.encrypt = 0x01;
1499 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
1500 sizeof(cp), &cp);
1501 } else {
1502 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1503 hci_encrypt_cfm(conn, ev->status, 0x00);
1504 }
1505 }
1506 }
1507
1508 hci_dev_unlock(hdev);
1509 }
1510
1511 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1512 {
1513 struct hci_ev_remote_name *ev = (void *) skb->data;
1514 struct hci_conn *conn;
1515
1516 BT_DBG("%s", hdev->name);
1517
1518 hci_conn_check_pending(hdev);
1519
1520 hci_dev_lock(hdev);
1521
1522 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1523 mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
1524
1525 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1526 if (!conn)
1527 goto unlock;
1528
1529 if (!hci_outgoing_auth_needed(hdev, conn))
1530 goto unlock;
1531
1532 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1533 struct hci_cp_auth_requested cp;
1534 cp.handle = __cpu_to_le16(conn->handle);
1535 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1536 }
1537
1538 unlock:
1539 hci_dev_unlock(hdev);
1540 }
1541
1542 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1543 {
1544 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1545 struct hci_conn *conn;
1546
1547 BT_DBG("%s status %d", hdev->name, ev->status);
1548
1549 hci_dev_lock(hdev);
1550
1551 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1552 if (conn) {
1553 if (!ev->status) {
1554 if (ev->encrypt) {
1555 /* Encryption implies authentication */
1556 conn->link_mode |= HCI_LM_AUTH;
1557 conn->link_mode |= HCI_LM_ENCRYPT;
1558 } else
1559 conn->link_mode &= ~HCI_LM_ENCRYPT;
1560 }
1561
1562 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1563
1564 if (conn->state == BT_CONFIG) {
1565 if (!ev->status)
1566 conn->state = BT_CONNECTED;
1567
1568 hci_proto_connect_cfm(conn, ev->status);
1569 hci_conn_put(conn);
1570 } else
1571 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1572 }
1573
1574 hci_dev_unlock(hdev);
1575 }
1576
1577 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1578 {
1579 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1580 struct hci_conn *conn;
1581
1582 BT_DBG("%s status %d", hdev->name, ev->status);
1583
1584 hci_dev_lock(hdev);
1585
1586 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1587 if (conn) {
1588 if (!ev->status)
1589 conn->link_mode |= HCI_LM_SECURE;
1590
1591 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1592
1593 hci_key_change_cfm(conn, ev->status);
1594 }
1595
1596 hci_dev_unlock(hdev);
1597 }
1598
1599 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1600 {
1601 struct hci_ev_remote_features *ev = (void *) skb->data;
1602 struct hci_conn *conn;
1603
1604 BT_DBG("%s status %d", hdev->name, ev->status);
1605
1606 hci_dev_lock(hdev);
1607
1608 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1609 if (!conn)
1610 goto unlock;
1611
1612 if (!ev->status)
1613 memcpy(conn->features, ev->features, 8);
1614
1615 if (conn->state != BT_CONFIG)
1616 goto unlock;
1617
1618 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1619 struct hci_cp_read_remote_ext_features cp;
1620 cp.handle = ev->handle;
1621 cp.page = 0x01;
1622 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1623 sizeof(cp), &cp);
1624 goto unlock;
1625 }
1626
1627 if (!ev->status) {
1628 struct hci_cp_remote_name_req cp;
1629 memset(&cp, 0, sizeof(cp));
1630 bacpy(&cp.bdaddr, &conn->dst);
1631 cp.pscan_rep_mode = 0x02;
1632 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1633 }
1634
1635 if (!hci_outgoing_auth_needed(hdev, conn)) {
1636 conn->state = BT_CONNECTED;
1637 hci_proto_connect_cfm(conn, ev->status);
1638 hci_conn_put(conn);
1639 }
1640
1641 unlock:
1642 hci_dev_unlock(hdev);
1643 }
1644
1645 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1646 {
1647 BT_DBG("%s", hdev->name);
1648 }
1649
1650 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1651 {
1652 BT_DBG("%s", hdev->name);
1653 }
1654
1655 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1656 {
1657 struct hci_ev_cmd_complete *ev = (void *) skb->data;
1658 __u16 opcode;
1659
1660 skb_pull(skb, sizeof(*ev));
1661
1662 opcode = __le16_to_cpu(ev->opcode);
1663
1664 switch (opcode) {
1665 case HCI_OP_INQUIRY_CANCEL:
1666 hci_cc_inquiry_cancel(hdev, skb);
1667 break;
1668
1669 case HCI_OP_EXIT_PERIODIC_INQ:
1670 hci_cc_exit_periodic_inq(hdev, skb);
1671 break;
1672
1673 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
1674 hci_cc_remote_name_req_cancel(hdev, skb);
1675 break;
1676
1677 case HCI_OP_ROLE_DISCOVERY:
1678 hci_cc_role_discovery(hdev, skb);
1679 break;
1680
1681 case HCI_OP_READ_LINK_POLICY:
1682 hci_cc_read_link_policy(hdev, skb);
1683 break;
1684
1685 case HCI_OP_WRITE_LINK_POLICY:
1686 hci_cc_write_link_policy(hdev, skb);
1687 break;
1688
1689 case HCI_OP_READ_DEF_LINK_POLICY:
1690 hci_cc_read_def_link_policy(hdev, skb);
1691 break;
1692
1693 case HCI_OP_WRITE_DEF_LINK_POLICY:
1694 hci_cc_write_def_link_policy(hdev, skb);
1695 break;
1696
1697 case HCI_OP_RESET:
1698 hci_cc_reset(hdev, skb);
1699 break;
1700
1701 case HCI_OP_WRITE_LOCAL_NAME:
1702 hci_cc_write_local_name(hdev, skb);
1703 break;
1704
1705 case HCI_OP_READ_LOCAL_NAME:
1706 hci_cc_read_local_name(hdev, skb);
1707 break;
1708
1709 case HCI_OP_WRITE_AUTH_ENABLE:
1710 hci_cc_write_auth_enable(hdev, skb);
1711 break;
1712
1713 case HCI_OP_WRITE_ENCRYPT_MODE:
1714 hci_cc_write_encrypt_mode(hdev, skb);
1715 break;
1716
1717 case HCI_OP_WRITE_SCAN_ENABLE:
1718 hci_cc_write_scan_enable(hdev, skb);
1719 break;
1720
1721 case HCI_OP_READ_CLASS_OF_DEV:
1722 hci_cc_read_class_of_dev(hdev, skb);
1723 break;
1724
1725 case HCI_OP_WRITE_CLASS_OF_DEV:
1726 hci_cc_write_class_of_dev(hdev, skb);
1727 break;
1728
1729 case HCI_OP_READ_VOICE_SETTING:
1730 hci_cc_read_voice_setting(hdev, skb);
1731 break;
1732
1733 case HCI_OP_WRITE_VOICE_SETTING:
1734 hci_cc_write_voice_setting(hdev, skb);
1735 break;
1736
1737 case HCI_OP_HOST_BUFFER_SIZE:
1738 hci_cc_host_buffer_size(hdev, skb);
1739 break;
1740
1741 case HCI_OP_READ_SSP_MODE:
1742 hci_cc_read_ssp_mode(hdev, skb);
1743 break;
1744
1745 case HCI_OP_WRITE_SSP_MODE:
1746 hci_cc_write_ssp_mode(hdev, skb);
1747 break;
1748
1749 case HCI_OP_READ_LOCAL_VERSION:
1750 hci_cc_read_local_version(hdev, skb);
1751 break;
1752
1753 case HCI_OP_READ_LOCAL_COMMANDS:
1754 hci_cc_read_local_commands(hdev, skb);
1755 break;
1756
1757 case HCI_OP_READ_LOCAL_FEATURES:
1758 hci_cc_read_local_features(hdev, skb);
1759 break;
1760
1761 case HCI_OP_READ_BUFFER_SIZE:
1762 hci_cc_read_buffer_size(hdev, skb);
1763 break;
1764
1765 case HCI_OP_READ_BD_ADDR:
1766 hci_cc_read_bd_addr(hdev, skb);
1767 break;
1768
1769 case HCI_OP_WRITE_CA_TIMEOUT:
1770 hci_cc_write_ca_timeout(hdev, skb);
1771 break;
1772
1773 case HCI_OP_DELETE_STORED_LINK_KEY:
1774 hci_cc_delete_stored_link_key(hdev, skb);
1775 break;
1776
1777 case HCI_OP_SET_EVENT_MASK:
1778 hci_cc_set_event_mask(hdev, skb);
1779 break;
1780
1781 case HCI_OP_WRITE_INQUIRY_MODE:
1782 hci_cc_write_inquiry_mode(hdev, skb);
1783 break;
1784
1785 case HCI_OP_READ_INQ_RSP_TX_POWER:
1786 hci_cc_read_inq_rsp_tx_power(hdev, skb);
1787 break;
1788
1789 case HCI_OP_SET_EVENT_FLT:
1790 hci_cc_set_event_flt(hdev, skb);
1791 break;
1792
1793 case HCI_OP_PIN_CODE_REPLY:
1794 hci_cc_pin_code_reply(hdev, skb);
1795 break;
1796
1797 case HCI_OP_PIN_CODE_NEG_REPLY:
1798 hci_cc_pin_code_neg_reply(hdev, skb);
1799 break;
1800
1801 case HCI_OP_READ_LOCAL_OOB_DATA:
1802 hci_cc_read_local_oob_data_reply(hdev, skb);
1803 break;
1804
1805 case HCI_OP_LE_READ_BUFFER_SIZE:
1806 hci_cc_le_read_buffer_size(hdev, skb);
1807 break;
1808
1809 case HCI_OP_USER_CONFIRM_REPLY:
1810 hci_cc_user_confirm_reply(hdev, skb);
1811 break;
1812
1813 case HCI_OP_USER_CONFIRM_NEG_REPLY:
1814 hci_cc_user_confirm_neg_reply(hdev, skb);
1815 break;
1816
1817 default:
1818 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1819 break;
1820 }
1821
1822 if (ev->opcode != HCI_OP_NOP)
1823 del_timer(&hdev->cmd_timer);
1824
1825 if (ev->ncmd) {
1826 atomic_set(&hdev->cmd_cnt, 1);
1827 if (!skb_queue_empty(&hdev->cmd_q))
1828 tasklet_schedule(&hdev->cmd_task);
1829 }
1830 }
1831
1832 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1833 {
1834 struct hci_ev_cmd_status *ev = (void *) skb->data;
1835 __u16 opcode;
1836
1837 skb_pull(skb, sizeof(*ev));
1838
1839 opcode = __le16_to_cpu(ev->opcode);
1840
1841 switch (opcode) {
1842 case HCI_OP_INQUIRY:
1843 hci_cs_inquiry(hdev, ev->status);
1844 break;
1845
1846 case HCI_OP_CREATE_CONN:
1847 hci_cs_create_conn(hdev, ev->status);
1848 break;
1849
1850 case HCI_OP_ADD_SCO:
1851 hci_cs_add_sco(hdev, ev->status);
1852 break;
1853
1854 case HCI_OP_AUTH_REQUESTED:
1855 hci_cs_auth_requested(hdev, ev->status);
1856 break;
1857
1858 case HCI_OP_SET_CONN_ENCRYPT:
1859 hci_cs_set_conn_encrypt(hdev, ev->status);
1860 break;
1861
1862 case HCI_OP_REMOTE_NAME_REQ:
1863 hci_cs_remote_name_req(hdev, ev->status);
1864 break;
1865
1866 case HCI_OP_READ_REMOTE_FEATURES:
1867 hci_cs_read_remote_features(hdev, ev->status);
1868 break;
1869
1870 case HCI_OP_READ_REMOTE_EXT_FEATURES:
1871 hci_cs_read_remote_ext_features(hdev, ev->status);
1872 break;
1873
1874 case HCI_OP_SETUP_SYNC_CONN:
1875 hci_cs_setup_sync_conn(hdev, ev->status);
1876 break;
1877
1878 case HCI_OP_SNIFF_MODE:
1879 hci_cs_sniff_mode(hdev, ev->status);
1880 break;
1881
1882 case HCI_OP_EXIT_SNIFF_MODE:
1883 hci_cs_exit_sniff_mode(hdev, ev->status);
1884 break;
1885
1886 case HCI_OP_DISCONNECT:
1887 if (ev->status != 0)
1888 mgmt_disconnect_failed(hdev->id);
1889 break;
1890
1891 case HCI_OP_LE_CREATE_CONN:
1892 hci_cs_le_create_conn(hdev, ev->status);
1893 break;
1894
1895 default:
1896 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1897 break;
1898 }
1899
1900 if (ev->opcode != HCI_OP_NOP)
1901 del_timer(&hdev->cmd_timer);
1902
1903 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
1904 atomic_set(&hdev->cmd_cnt, 1);
1905 if (!skb_queue_empty(&hdev->cmd_q))
1906 tasklet_schedule(&hdev->cmd_task);
1907 }
1908 }
1909
1910 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1911 {
1912 struct hci_ev_role_change *ev = (void *) skb->data;
1913 struct hci_conn *conn;
1914
1915 BT_DBG("%s status %d", hdev->name, ev->status);
1916
1917 hci_dev_lock(hdev);
1918
1919 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1920 if (conn) {
1921 if (!ev->status) {
1922 if (ev->role)
1923 conn->link_mode &= ~HCI_LM_MASTER;
1924 else
1925 conn->link_mode |= HCI_LM_MASTER;
1926 }
1927
1928 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
1929
1930 hci_role_switch_cfm(conn, ev->status, ev->role);
1931 }
1932
1933 hci_dev_unlock(hdev);
1934 }
1935
1936 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
1937 {
1938 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
1939 __le16 *ptr;
1940 int i;
1941
1942 skb_pull(skb, sizeof(*ev));
1943
1944 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
1945
1946 if (skb->len < ev->num_hndl * 4) {
1947 BT_DBG("%s bad parameters", hdev->name);
1948 return;
1949 }
1950
1951 tasklet_disable(&hdev->tx_task);
1952
1953 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
1954 struct hci_conn *conn;
1955 __u16 handle, count;
1956
1957 handle = get_unaligned_le16(ptr++);
1958 count = get_unaligned_le16(ptr++);
1959
1960 conn = hci_conn_hash_lookup_handle(hdev, handle);
1961 if (conn) {
1962 conn->sent -= count;
1963
1964 if (conn->type == ACL_LINK) {
1965 hdev->acl_cnt += count;
1966 if (hdev->acl_cnt > hdev->acl_pkts)
1967 hdev->acl_cnt = hdev->acl_pkts;
1968 } else if (conn->type == LE_LINK) {
1969 if (hdev->le_pkts) {
1970 hdev->le_cnt += count;
1971 if (hdev->le_cnt > hdev->le_pkts)
1972 hdev->le_cnt = hdev->le_pkts;
1973 } else {
1974 hdev->acl_cnt += count;
1975 if (hdev->acl_cnt > hdev->acl_pkts)
1976 hdev->acl_cnt = hdev->acl_pkts;
1977 }
1978 } else {
1979 hdev->sco_cnt += count;
1980 if (hdev->sco_cnt > hdev->sco_pkts)
1981 hdev->sco_cnt = hdev->sco_pkts;
1982 }
1983 }
1984 }
1985
1986 tasklet_schedule(&hdev->tx_task);
1987
1988 tasklet_enable(&hdev->tx_task);
1989 }
1990
1991 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1992 {
1993 struct hci_ev_mode_change *ev = (void *) skb->data;
1994 struct hci_conn *conn;
1995
1996 BT_DBG("%s status %d", hdev->name, ev->status);
1997
1998 hci_dev_lock(hdev);
1999
2000 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2001 if (conn) {
2002 conn->mode = ev->mode;
2003 conn->interval = __le16_to_cpu(ev->interval);
2004
2005 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
2006 if (conn->mode == HCI_CM_ACTIVE)
2007 conn->power_save = 1;
2008 else
2009 conn->power_save = 0;
2010 }
2011
2012 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
2013 hci_sco_setup(conn, ev->status);
2014 }
2015
2016 hci_dev_unlock(hdev);
2017 }
2018
2019 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2020 {
2021 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2022 struct hci_conn *conn;
2023
2024 BT_DBG("%s", hdev->name);
2025
2026 hci_dev_lock(hdev);
2027
2028 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2029 if (conn && conn->state == BT_CONNECTED) {
2030 hci_conn_hold(conn);
2031 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2032 hci_conn_put(conn);
2033 }
2034
2035 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
2036 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2037 sizeof(ev->bdaddr), &ev->bdaddr);
2038 else if (test_bit(HCI_MGMT, &hdev->flags)) {
2039 u8 secure;
2040
2041 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2042 secure = 1;
2043 else
2044 secure = 0;
2045
2046 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
2047 }
2048
2049 hci_dev_unlock(hdev);
2050 }
2051
2052 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2053 {
2054 struct hci_ev_link_key_req *ev = (void *) skb->data;
2055 struct hci_cp_link_key_reply cp;
2056 struct hci_conn *conn;
2057 struct link_key *key;
2058
2059 BT_DBG("%s", hdev->name);
2060
2061 if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2062 return;
2063
2064 hci_dev_lock(hdev);
2065
2066 key = hci_find_link_key(hdev, &ev->bdaddr);
2067 if (!key) {
2068 BT_DBG("%s link key not found for %s", hdev->name,
2069 batostr(&ev->bdaddr));
2070 goto not_found;
2071 }
2072
2073 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2074 batostr(&ev->bdaddr));
2075
2076 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
2077 key->type == HCI_LK_DEBUG_COMBINATION) {
2078 BT_DBG("%s ignoring debug key", hdev->name);
2079 goto not_found;
2080 }
2081
2082 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2083 if (conn) {
2084 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2085 conn->auth_type != 0xff &&
2086 (conn->auth_type & 0x01)) {
2087 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2088 goto not_found;
2089 }
2090
2091 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2092 conn->pending_sec_level == BT_SECURITY_HIGH) {
2093 BT_DBG("%s ignoring key unauthenticated for high \
2094 security", hdev->name);
2095 goto not_found;
2096 }
2097
2098 conn->key_type = key->type;
2099 conn->pin_length = key->pin_len;
2100 }
2101
2102 bacpy(&cp.bdaddr, &ev->bdaddr);
2103 memcpy(cp.link_key, key->val, 16);
2104
2105 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2106
2107 hci_dev_unlock(hdev);
2108
2109 return;
2110
2111 not_found:
2112 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2113 hci_dev_unlock(hdev);
2114 }
2115
2116 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2117 {
2118 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2119 struct hci_conn *conn;
2120 u8 pin_len = 0;
2121
2122 BT_DBG("%s", hdev->name);
2123
2124 hci_dev_lock(hdev);
2125
2126 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2127 if (conn) {
2128 hci_conn_hold(conn);
2129 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2130 pin_len = conn->pin_length;
2131
2132 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2133 conn->key_type = ev->key_type;
2134
2135 hci_conn_put(conn);
2136 }
2137
2138 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2139 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2140 ev->key_type, pin_len);
2141
2142 hci_dev_unlock(hdev);
2143 }
2144
2145 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2146 {
2147 struct hci_ev_clock_offset *ev = (void *) skb->data;
2148 struct hci_conn *conn;
2149
2150 BT_DBG("%s status %d", hdev->name, ev->status);
2151
2152 hci_dev_lock(hdev);
2153
2154 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2155 if (conn && !ev->status) {
2156 struct inquiry_entry *ie;
2157
2158 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2159 if (ie) {
2160 ie->data.clock_offset = ev->clock_offset;
2161 ie->timestamp = jiffies;
2162 }
2163 }
2164
2165 hci_dev_unlock(hdev);
2166 }
2167
2168 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2169 {
2170 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2171 struct hci_conn *conn;
2172
2173 BT_DBG("%s status %d", hdev->name, ev->status);
2174
2175 hci_dev_lock(hdev);
2176
2177 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2178 if (conn && !ev->status)
2179 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2180
2181 hci_dev_unlock(hdev);
2182 }
2183
2184 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2185 {
2186 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2187 struct inquiry_entry *ie;
2188
2189 BT_DBG("%s", hdev->name);
2190
2191 hci_dev_lock(hdev);
2192
2193 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2194 if (ie) {
2195 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2196 ie->timestamp = jiffies;
2197 }
2198
2199 hci_dev_unlock(hdev);
2200 }
2201
2202 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2203 {
2204 struct inquiry_data data;
2205 int num_rsp = *((__u8 *) skb->data);
2206
2207 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2208
2209 if (!num_rsp)
2210 return;
2211
2212 hci_dev_lock(hdev);
2213
2214 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2215
2216 if (test_bit(HCI_MGMT, &hdev->flags))
2217 mgmt_discovering(hdev->id, 1);
2218 }
2219
2220 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2221 struct inquiry_info_with_rssi_and_pscan_mode *info;
2222 info = (void *) (skb->data + 1);
2223
2224 for (; num_rsp; num_rsp--, info++) {
2225 bacpy(&data.bdaddr, &info->bdaddr);
2226 data.pscan_rep_mode = info->pscan_rep_mode;
2227 data.pscan_period_mode = info->pscan_period_mode;
2228 data.pscan_mode = info->pscan_mode;
2229 memcpy(data.dev_class, info->dev_class, 3);
2230 data.clock_offset = info->clock_offset;
2231 data.rssi = info->rssi;
2232 data.ssp_mode = 0x00;
2233 hci_inquiry_cache_update(hdev, &data);
2234 mgmt_device_found(hdev->id, &info->bdaddr,
2235 info->dev_class, info->rssi,
2236 NULL);
2237 }
2238 } else {
2239 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2240
2241 for (; num_rsp; num_rsp--, info++) {
2242 bacpy(&data.bdaddr, &info->bdaddr);
2243 data.pscan_rep_mode = info->pscan_rep_mode;
2244 data.pscan_period_mode = info->pscan_period_mode;
2245 data.pscan_mode = 0x00;
2246 memcpy(data.dev_class, info->dev_class, 3);
2247 data.clock_offset = info->clock_offset;
2248 data.rssi = info->rssi;
2249 data.ssp_mode = 0x00;
2250 hci_inquiry_cache_update(hdev, &data);
2251 mgmt_device_found(hdev->id, &info->bdaddr,
2252 info->dev_class, info->rssi,
2253 NULL);
2254 }
2255 }
2256
2257 hci_dev_unlock(hdev);
2258 }
2259
2260 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2261 {
2262 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2263 struct hci_conn *conn;
2264
2265 BT_DBG("%s", hdev->name);
2266
2267 hci_dev_lock(hdev);
2268
2269 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2270 if (!conn)
2271 goto unlock;
2272
2273 if (!ev->status && ev->page == 0x01) {
2274 struct inquiry_entry *ie;
2275
2276 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2277 if (ie)
2278 ie->data.ssp_mode = (ev->features[0] & 0x01);
2279
2280 conn->ssp_mode = (ev->features[0] & 0x01);
2281 }
2282
2283 if (conn->state != BT_CONFIG)
2284 goto unlock;
2285
2286 if (!ev->status) {
2287 struct hci_cp_remote_name_req cp;
2288 memset(&cp, 0, sizeof(cp));
2289 bacpy(&cp.bdaddr, &conn->dst);
2290 cp.pscan_rep_mode = 0x02;
2291 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2292 }
2293
2294 if (!hci_outgoing_auth_needed(hdev, conn)) {
2295 conn->state = BT_CONNECTED;
2296 hci_proto_connect_cfm(conn, ev->status);
2297 hci_conn_put(conn);
2298 }
2299
2300 unlock:
2301 hci_dev_unlock(hdev);
2302 }
2303
2304 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2305 {
2306 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2307 struct hci_conn *conn;
2308
2309 BT_DBG("%s status %d", hdev->name, ev->status);
2310
2311 hci_dev_lock(hdev);
2312
2313 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2314 if (!conn) {
2315 if (ev->link_type == ESCO_LINK)
2316 goto unlock;
2317
2318 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2319 if (!conn)
2320 goto unlock;
2321
2322 conn->type = SCO_LINK;
2323 }
2324
2325 switch (ev->status) {
2326 case 0x00:
2327 conn->handle = __le16_to_cpu(ev->handle);
2328 conn->state = BT_CONNECTED;
2329
2330 hci_conn_hold_device(conn);
2331 hci_conn_add_sysfs(conn);
2332 break;
2333
2334 case 0x11: /* Unsupported Feature or Parameter Value */
2335 case 0x1c: /* SCO interval rejected */
2336 case 0x1a: /* Unsupported Remote Feature */
2337 case 0x1f: /* Unspecified error */
2338 if (conn->out && conn->attempt < 2) {
2339 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2340 (hdev->esco_type & EDR_ESCO_MASK);
2341 hci_setup_sync(conn, conn->link->handle);
2342 goto unlock;
2343 }
2344 /* fall through */
2345
2346 default:
2347 conn->state = BT_CLOSED;
2348 break;
2349 }
2350
2351 hci_proto_connect_cfm(conn, ev->status);
2352 if (ev->status)
2353 hci_conn_del(conn);
2354
2355 unlock:
2356 hci_dev_unlock(hdev);
2357 }
2358
2359 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2360 {
2361 BT_DBG("%s", hdev->name);
2362 }
2363
2364 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2365 {
2366 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2367
2368 BT_DBG("%s status %d", hdev->name, ev->status);
2369 }
2370
2371 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2372 {
2373 struct inquiry_data data;
2374 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2375 int num_rsp = *((__u8 *) skb->data);
2376
2377 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2378
2379 if (!num_rsp)
2380 return;
2381
2382 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2383
2384 if (test_bit(HCI_MGMT, &hdev->flags))
2385 mgmt_discovering(hdev->id, 1);
2386 }
2387
2388 hci_dev_lock(hdev);
2389
2390 for (; num_rsp; num_rsp--, info++) {
2391 bacpy(&data.bdaddr, &info->bdaddr);
2392 data.pscan_rep_mode = info->pscan_rep_mode;
2393 data.pscan_period_mode = info->pscan_period_mode;
2394 data.pscan_mode = 0x00;
2395 memcpy(data.dev_class, info->dev_class, 3);
2396 data.clock_offset = info->clock_offset;
2397 data.rssi = info->rssi;
2398 data.ssp_mode = 0x01;
2399 hci_inquiry_cache_update(hdev, &data);
2400 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
2401 info->rssi, info->data);
2402 }
2403
2404 hci_dev_unlock(hdev);
2405 }
2406
2407 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2408 {
2409 /* If remote requests dedicated bonding follow that lead */
2410 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2411 /* If both remote and local IO capabilities allow MITM
2412 * protection then require it, otherwise don't */
2413 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2414 return 0x02;
2415 else
2416 return 0x03;
2417 }
2418
2419 /* If remote requests no-bonding follow that lead */
2420 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2421 return conn->remote_auth | (conn->auth_type & 0x01);
2422
2423 return conn->auth_type;
2424 }
2425
2426 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2427 {
2428 struct hci_ev_io_capa_request *ev = (void *) skb->data;
2429 struct hci_conn *conn;
2430
2431 BT_DBG("%s", hdev->name);
2432
2433 hci_dev_lock(hdev);
2434
2435 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2436 if (!conn)
2437 goto unlock;
2438
2439 hci_conn_hold(conn);
2440
2441 if (!test_bit(HCI_MGMT, &hdev->flags))
2442 goto unlock;
2443
2444 if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2445 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2446 struct hci_cp_io_capability_reply cp;
2447
2448 bacpy(&cp.bdaddr, &ev->bdaddr);
2449 cp.capability = conn->io_capability;
2450 conn->auth_type = hci_get_auth_req(conn);
2451 cp.authentication = conn->auth_type;
2452
2453 if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2454 hci_find_remote_oob_data(hdev, &conn->dst))
2455 cp.oob_data = 0x01;
2456 else
2457 cp.oob_data = 0x00;
2458
2459 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2460 sizeof(cp), &cp);
2461 } else {
2462 struct hci_cp_io_capability_neg_reply cp;
2463
2464 bacpy(&cp.bdaddr, &ev->bdaddr);
2465 cp.reason = 0x18; /* Pairing not allowed */
2466
2467 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2468 sizeof(cp), &cp);
2469 }
2470
2471 unlock:
2472 hci_dev_unlock(hdev);
2473 }
2474
2475 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2476 {
2477 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2478 struct hci_conn *conn;
2479
2480 BT_DBG("%s", hdev->name);
2481
2482 hci_dev_lock(hdev);
2483
2484 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2485 if (!conn)
2486 goto unlock;
2487
2488 conn->remote_cap = ev->capability;
2489 conn->remote_oob = ev->oob_data;
2490 conn->remote_auth = ev->authentication;
2491
2492 unlock:
2493 hci_dev_unlock(hdev);
2494 }
2495
2496 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2497 struct sk_buff *skb)
2498 {
2499 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2500 int loc_mitm, rem_mitm, confirm_hint = 0;
2501 struct hci_conn *conn;
2502
2503 BT_DBG("%s", hdev->name);
2504
2505 hci_dev_lock(hdev);
2506
2507 if (!test_bit(HCI_MGMT, &hdev->flags))
2508 goto unlock;
2509
2510 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2511 if (!conn)
2512 goto unlock;
2513
2514 loc_mitm = (conn->auth_type & 0x01);
2515 rem_mitm = (conn->remote_auth & 0x01);
2516
2517 /* If we require MITM but the remote device can't provide that
2518 * (it has NoInputNoOutput) then reject the confirmation
2519 * request. The only exception is when we're dedicated bonding
2520 * initiators (connect_cfm_cb set) since then we always have the MITM
2521 * bit set. */
2522 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
2523 BT_DBG("Rejecting request: remote device can't provide MITM");
2524 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
2525 sizeof(ev->bdaddr), &ev->bdaddr);
2526 goto unlock;
2527 }
2528
2529 /* If no side requires MITM protection; auto-accept */
2530 if ((!loc_mitm || conn->remote_cap == 0x03) &&
2531 (!rem_mitm || conn->io_capability == 0x03)) {
2532
2533 /* If we're not the initiators request authorization to
2534 * proceed from user space (mgmt_user_confirm with
2535 * confirm_hint set to 1). */
2536 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
2537 BT_DBG("Confirming auto-accept as acceptor");
2538 confirm_hint = 1;
2539 goto confirm;
2540 }
2541
2542 BT_DBG("Auto-accept of user confirmation with %ums delay",
2543 hdev->auto_accept_delay);
2544
2545 if (hdev->auto_accept_delay > 0) {
2546 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
2547 mod_timer(&conn->auto_accept_timer, jiffies + delay);
2548 goto unlock;
2549 }
2550
2551 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
2552 sizeof(ev->bdaddr), &ev->bdaddr);
2553 goto unlock;
2554 }
2555
2556 confirm:
2557 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
2558 confirm_hint);
2559
2560 unlock:
2561 hci_dev_unlock(hdev);
2562 }
2563
2564 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2565 {
2566 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
2567 struct hci_conn *conn;
2568
2569 BT_DBG("%s", hdev->name);
2570
2571 hci_dev_lock(hdev);
2572
2573 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2574 if (!conn)
2575 goto unlock;
2576
2577 /* To avoid duplicate auth_failed events to user space we check
2578 * the HCI_CONN_AUTH_PEND flag which will be set if we
2579 * initiated the authentication. A traditional auth_complete
2580 * event gets always produced as initiator and is also mapped to
2581 * the mgmt_auth_failed event */
2582 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2583 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
2584
2585 hci_conn_put(conn);
2586
2587 unlock:
2588 hci_dev_unlock(hdev);
2589 }
2590
2591 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2592 {
2593 struct hci_ev_remote_host_features *ev = (void *) skb->data;
2594 struct inquiry_entry *ie;
2595
2596 BT_DBG("%s", hdev->name);
2597
2598 hci_dev_lock(hdev);
2599
2600 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2601 if (ie)
2602 ie->data.ssp_mode = (ev->features[0] & 0x01);
2603
2604 hci_dev_unlock(hdev);
2605 }
2606
2607 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2608 struct sk_buff *skb)
2609 {
2610 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2611 struct oob_data *data;
2612
2613 BT_DBG("%s", hdev->name);
2614
2615 hci_dev_lock(hdev);
2616
2617 if (!test_bit(HCI_MGMT, &hdev->flags))
2618 goto unlock;
2619
2620 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
2621 if (data) {
2622 struct hci_cp_remote_oob_data_reply cp;
2623
2624 bacpy(&cp.bdaddr, &ev->bdaddr);
2625 memcpy(cp.hash, data->hash, sizeof(cp.hash));
2626 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
2627
2628 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
2629 &cp);
2630 } else {
2631 struct hci_cp_remote_oob_data_neg_reply cp;
2632
2633 bacpy(&cp.bdaddr, &ev->bdaddr);
2634 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
2635 &cp);
2636 }
2637
2638 unlock:
2639 hci_dev_unlock(hdev);
2640 }
2641
2642 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2643 {
2644 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2645 struct hci_conn *conn;
2646
2647 BT_DBG("%s status %d", hdev->name, ev->status);
2648
2649 hci_dev_lock(hdev);
2650
2651 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2652 if (!conn) {
2653 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
2654 if (!conn) {
2655 BT_ERR("No memory for new connection");
2656 hci_dev_unlock(hdev);
2657 return;
2658 }
2659 }
2660
2661 if (ev->status) {
2662 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
2663 hci_proto_connect_cfm(conn, ev->status);
2664 conn->state = BT_CLOSED;
2665 hci_conn_del(conn);
2666 goto unlock;
2667 }
2668
2669 mgmt_connected(hdev->id, &ev->bdaddr);
2670
2671 conn->handle = __le16_to_cpu(ev->handle);
2672 conn->state = BT_CONNECTED;
2673
2674 hci_conn_hold_device(conn);
2675 hci_conn_add_sysfs(conn);
2676
2677 hci_proto_connect_cfm(conn, ev->status);
2678
2679 unlock:
2680 hci_dev_unlock(hdev);
2681 }
2682
2683 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2684 {
2685 struct hci_ev_le_meta *le_ev = (void *) skb->data;
2686
2687 skb_pull(skb, sizeof(*le_ev));
2688
2689 switch (le_ev->subevent) {
2690 case HCI_EV_LE_CONN_COMPLETE:
2691 hci_le_conn_complete_evt(hdev, skb);
2692 break;
2693
2694 default:
2695 break;
2696 }
2697 }
2698
2699 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2700 {
2701 struct hci_event_hdr *hdr = (void *) skb->data;
2702 __u8 event = hdr->evt;
2703
2704 skb_pull(skb, HCI_EVENT_HDR_SIZE);
2705
2706 switch (event) {
2707 case HCI_EV_INQUIRY_COMPLETE:
2708 hci_inquiry_complete_evt(hdev, skb);
2709 break;
2710
2711 case HCI_EV_INQUIRY_RESULT:
2712 hci_inquiry_result_evt(hdev, skb);
2713 break;
2714
2715 case HCI_EV_CONN_COMPLETE:
2716 hci_conn_complete_evt(hdev, skb);
2717 break;
2718
2719 case HCI_EV_CONN_REQUEST:
2720 hci_conn_request_evt(hdev, skb);
2721 break;
2722
2723 case HCI_EV_DISCONN_COMPLETE:
2724 hci_disconn_complete_evt(hdev, skb);
2725 break;
2726
2727 case HCI_EV_AUTH_COMPLETE:
2728 hci_auth_complete_evt(hdev, skb);
2729 break;
2730
2731 case HCI_EV_REMOTE_NAME:
2732 hci_remote_name_evt(hdev, skb);
2733 break;
2734
2735 case HCI_EV_ENCRYPT_CHANGE:
2736 hci_encrypt_change_evt(hdev, skb);
2737 break;
2738
2739 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
2740 hci_change_link_key_complete_evt(hdev, skb);
2741 break;
2742
2743 case HCI_EV_REMOTE_FEATURES:
2744 hci_remote_features_evt(hdev, skb);
2745 break;
2746
2747 case HCI_EV_REMOTE_VERSION:
2748 hci_remote_version_evt(hdev, skb);
2749 break;
2750
2751 case HCI_EV_QOS_SETUP_COMPLETE:
2752 hci_qos_setup_complete_evt(hdev, skb);
2753 break;
2754
2755 case HCI_EV_CMD_COMPLETE:
2756 hci_cmd_complete_evt(hdev, skb);
2757 break;
2758
2759 case HCI_EV_CMD_STATUS:
2760 hci_cmd_status_evt(hdev, skb);
2761 break;
2762
2763 case HCI_EV_ROLE_CHANGE:
2764 hci_role_change_evt(hdev, skb);
2765 break;
2766
2767 case HCI_EV_NUM_COMP_PKTS:
2768 hci_num_comp_pkts_evt(hdev, skb);
2769 break;
2770
2771 case HCI_EV_MODE_CHANGE:
2772 hci_mode_change_evt(hdev, skb);
2773 break;
2774
2775 case HCI_EV_PIN_CODE_REQ:
2776 hci_pin_code_request_evt(hdev, skb);
2777 break;
2778
2779 case HCI_EV_LINK_KEY_REQ:
2780 hci_link_key_request_evt(hdev, skb);
2781 break;
2782
2783 case HCI_EV_LINK_KEY_NOTIFY:
2784 hci_link_key_notify_evt(hdev, skb);
2785 break;
2786
2787 case HCI_EV_CLOCK_OFFSET:
2788 hci_clock_offset_evt(hdev, skb);
2789 break;
2790
2791 case HCI_EV_PKT_TYPE_CHANGE:
2792 hci_pkt_type_change_evt(hdev, skb);
2793 break;
2794
2795 case HCI_EV_PSCAN_REP_MODE:
2796 hci_pscan_rep_mode_evt(hdev, skb);
2797 break;
2798
2799 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
2800 hci_inquiry_result_with_rssi_evt(hdev, skb);
2801 break;
2802
2803 case HCI_EV_REMOTE_EXT_FEATURES:
2804 hci_remote_ext_features_evt(hdev, skb);
2805 break;
2806
2807 case HCI_EV_SYNC_CONN_COMPLETE:
2808 hci_sync_conn_complete_evt(hdev, skb);
2809 break;
2810
2811 case HCI_EV_SYNC_CONN_CHANGED:
2812 hci_sync_conn_changed_evt(hdev, skb);
2813 break;
2814
2815 case HCI_EV_SNIFF_SUBRATE:
2816 hci_sniff_subrate_evt(hdev, skb);
2817 break;
2818
2819 case HCI_EV_EXTENDED_INQUIRY_RESULT:
2820 hci_extended_inquiry_result_evt(hdev, skb);
2821 break;
2822
2823 case HCI_EV_IO_CAPA_REQUEST:
2824 hci_io_capa_request_evt(hdev, skb);
2825 break;
2826
2827 case HCI_EV_IO_CAPA_REPLY:
2828 hci_io_capa_reply_evt(hdev, skb);
2829 break;
2830
2831 case HCI_EV_USER_CONFIRM_REQUEST:
2832 hci_user_confirm_request_evt(hdev, skb);
2833 break;
2834
2835 case HCI_EV_SIMPLE_PAIR_COMPLETE:
2836 hci_simple_pair_complete_evt(hdev, skb);
2837 break;
2838
2839 case HCI_EV_REMOTE_HOST_FEATURES:
2840 hci_remote_host_features_evt(hdev, skb);
2841 break;
2842
2843 case HCI_EV_LE_META:
2844 hci_le_meta_evt(hdev, skb);
2845 break;
2846
2847 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
2848 hci_remote_oob_data_request_evt(hdev, skb);
2849 break;
2850
2851 default:
2852 BT_DBG("%s event 0x%x", hdev->name, event);
2853 break;
2854 }
2855
2856 kfree_skb(skb);
2857 hdev->stat.evt_rx++;
2858 }
2859
2860 /* Generate internal stack event */
2861 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
2862 {
2863 struct hci_event_hdr *hdr;
2864 struct hci_ev_stack_internal *ev;
2865 struct sk_buff *skb;
2866
2867 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
2868 if (!skb)
2869 return;
2870
2871 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
2872 hdr->evt = HCI_EV_STACK_INTERNAL;
2873 hdr->plen = sizeof(*ev) + dlen;
2874
2875 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
2876 ev->type = type;
2877 memcpy(ev->data, data, dlen);
2878
2879 bt_cb(skb)->incoming = 1;
2880 __net_timestamp(skb);
2881
2882 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
2883 skb->dev = (void *) hdev;
2884 hci_send_to_sock(hdev, skb, NULL);
2885 kfree_skb(skb);
2886 }