Merge branch 'next' into for-linus
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <net/sock.h>
39
40 #include <linux/uaccess.h>
41 #include <asm/unaligned.h>
42
43 #include <net/bluetooth/bluetooth.h>
44 #include <net/bluetooth/hci_core.h>
45
46 static void hci_le_connect(struct hci_conn *conn)
47 {
48 struct hci_dev *hdev = conn->hdev;
49 struct hci_cp_le_create_conn cp;
50
51 conn->state = BT_CONNECT;
52 conn->out = true;
53 conn->link_mode |= HCI_LM_MASTER;
54 conn->sec_level = BT_SECURITY_LOW;
55
56 memset(&cp, 0, sizeof(cp));
57 cp.scan_interval = cpu_to_le16(0x0060);
58 cp.scan_window = cpu_to_le16(0x0030);
59 bacpy(&cp.peer_addr, &conn->dst);
60 cp.peer_addr_type = conn->dst_type;
61 cp.conn_interval_min = cpu_to_le16(0x0028);
62 cp.conn_interval_max = cpu_to_le16(0x0038);
63 cp.supervision_timeout = cpu_to_le16(0x002a);
64 cp.min_ce_len = cpu_to_le16(0x0000);
65 cp.max_ce_len = cpu_to_le16(0x0000);
66
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
68 }
69
70 static void hci_le_connect_cancel(struct hci_conn *conn)
71 {
72 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
73 }
74
75 void hci_acl_connect(struct hci_conn *conn)
76 {
77 struct hci_dev *hdev = conn->hdev;
78 struct inquiry_entry *ie;
79 struct hci_cp_create_conn cp;
80
81 BT_DBG("hcon %p", conn);
82
83 conn->state = BT_CONNECT;
84 conn->out = true;
85
86 conn->link_mode = HCI_LM_MASTER;
87
88 conn->attempt++;
89
90 conn->link_policy = hdev->link_policy;
91
92 memset(&cp, 0, sizeof(cp));
93 bacpy(&cp.bdaddr, &conn->dst);
94 cp.pscan_rep_mode = 0x02;
95
96 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
97 if (ie) {
98 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
99 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
100 cp.pscan_mode = ie->data.pscan_mode;
101 cp.clock_offset = ie->data.clock_offset |
102 cpu_to_le16(0x8000);
103 }
104
105 memcpy(conn->dev_class, ie->data.dev_class, 3);
106 if (ie->data.ssp_mode > 0)
107 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
108 }
109
110 cp.pkt_type = cpu_to_le16(conn->pkt_type);
111 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
112 cp.role_switch = 0x01;
113 else
114 cp.role_switch = 0x00;
115
116 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
117 }
118
119 static void hci_acl_connect_cancel(struct hci_conn *conn)
120 {
121 struct hci_cp_create_conn_cancel cp;
122
123 BT_DBG("%p", conn);
124
125 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
126 return;
127
128 bacpy(&cp.bdaddr, &conn->dst);
129 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
130 }
131
132 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
133 {
134 struct hci_cp_disconnect cp;
135
136 BT_DBG("%p", conn);
137
138 conn->state = BT_DISCONN;
139
140 cp.handle = cpu_to_le16(conn->handle);
141 cp.reason = reason;
142 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
143 }
144
145 void hci_add_sco(struct hci_conn *conn, __u16 handle)
146 {
147 struct hci_dev *hdev = conn->hdev;
148 struct hci_cp_add_sco cp;
149
150 BT_DBG("%p", conn);
151
152 conn->state = BT_CONNECT;
153 conn->out = true;
154
155 conn->attempt++;
156
157 cp.handle = cpu_to_le16(handle);
158 cp.pkt_type = cpu_to_le16(conn->pkt_type);
159
160 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
161 }
162
163 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
164 {
165 struct hci_dev *hdev = conn->hdev;
166 struct hci_cp_setup_sync_conn cp;
167
168 BT_DBG("%p", conn);
169
170 conn->state = BT_CONNECT;
171 conn->out = true;
172
173 conn->attempt++;
174
175 cp.handle = cpu_to_le16(handle);
176 cp.pkt_type = cpu_to_le16(conn->pkt_type);
177
178 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
179 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
180 cp.max_latency = cpu_to_le16(0xffff);
181 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
182 cp.retrans_effort = 0xff;
183
184 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
185 }
186
187 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
188 u16 latency, u16 to_multiplier)
189 {
190 struct hci_cp_le_conn_update cp;
191 struct hci_dev *hdev = conn->hdev;
192
193 memset(&cp, 0, sizeof(cp));
194
195 cp.handle = cpu_to_le16(conn->handle);
196 cp.conn_interval_min = cpu_to_le16(min);
197 cp.conn_interval_max = cpu_to_le16(max);
198 cp.conn_latency = cpu_to_le16(latency);
199 cp.supervision_timeout = cpu_to_le16(to_multiplier);
200 cp.min_ce_len = cpu_to_le16(0x0001);
201 cp.max_ce_len = cpu_to_le16(0x0001);
202
203 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
204 }
205 EXPORT_SYMBOL(hci_le_conn_update);
206
207 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
208 __u8 ltk[16])
209 {
210 struct hci_dev *hdev = conn->hdev;
211 struct hci_cp_le_start_enc cp;
212
213 BT_DBG("%p", conn);
214
215 memset(&cp, 0, sizeof(cp));
216
217 cp.handle = cpu_to_le16(conn->handle);
218 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
219 cp.ediv = ediv;
220 memcpy(cp.rand, rand, sizeof(cp.rand));
221
222 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
223 }
224 EXPORT_SYMBOL(hci_le_start_enc);
225
226 void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
227 {
228 struct hci_dev *hdev = conn->hdev;
229 struct hci_cp_le_ltk_reply cp;
230
231 BT_DBG("%p", conn);
232
233 memset(&cp, 0, sizeof(cp));
234
235 cp.handle = cpu_to_le16(conn->handle);
236 memcpy(cp.ltk, ltk, sizeof(ltk));
237
238 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
239 }
240 EXPORT_SYMBOL(hci_le_ltk_reply);
241
242 void hci_le_ltk_neg_reply(struct hci_conn *conn)
243 {
244 struct hci_dev *hdev = conn->hdev;
245 struct hci_cp_le_ltk_neg_reply cp;
246
247 BT_DBG("%p", conn);
248
249 memset(&cp, 0, sizeof(cp));
250
251 cp.handle = cpu_to_le16(conn->handle);
252
253 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
254 }
255
256 /* Device _must_ be locked */
257 void hci_sco_setup(struct hci_conn *conn, __u8 status)
258 {
259 struct hci_conn *sco = conn->link;
260
261 BT_DBG("%p", conn);
262
263 if (!sco)
264 return;
265
266 if (!status) {
267 if (lmp_esco_capable(conn->hdev))
268 hci_setup_sync(sco, conn->handle);
269 else
270 hci_add_sco(sco, conn->handle);
271 } else {
272 hci_proto_connect_cfm(sco, status);
273 hci_conn_del(sco);
274 }
275 }
276
277 static void hci_conn_timeout(struct work_struct *work)
278 {
279 struct hci_conn *conn = container_of(work, struct hci_conn,
280 disc_work.work);
281 __u8 reason;
282
283 BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
284
285 if (atomic_read(&conn->refcnt))
286 return;
287
288 switch (conn->state) {
289 case BT_CONNECT:
290 case BT_CONNECT2:
291 if (conn->out) {
292 if (conn->type == ACL_LINK)
293 hci_acl_connect_cancel(conn);
294 else if (conn->type == LE_LINK)
295 hci_le_connect_cancel(conn);
296 }
297 break;
298 case BT_CONFIG:
299 case BT_CONNECTED:
300 reason = hci_proto_disconn_ind(conn);
301 hci_acl_disconn(conn, reason);
302 break;
303 default:
304 conn->state = BT_CLOSED;
305 break;
306 }
307 }
308
309 /* Enter sniff mode */
310 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
311 {
312 struct hci_dev *hdev = conn->hdev;
313
314 BT_DBG("conn %p mode %d", conn, conn->mode);
315
316 if (test_bit(HCI_RAW, &hdev->flags))
317 return;
318
319 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
320 return;
321
322 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
323 return;
324
325 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
326 struct hci_cp_sniff_subrate cp;
327 cp.handle = cpu_to_le16(conn->handle);
328 cp.max_latency = cpu_to_le16(0);
329 cp.min_remote_timeout = cpu_to_le16(0);
330 cp.min_local_timeout = cpu_to_le16(0);
331 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
332 }
333
334 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
335 struct hci_cp_sniff_mode cp;
336 cp.handle = cpu_to_le16(conn->handle);
337 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
338 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
339 cp.attempt = cpu_to_le16(4);
340 cp.timeout = cpu_to_le16(1);
341 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
342 }
343 }
344
345 static void hci_conn_idle(unsigned long arg)
346 {
347 struct hci_conn *conn = (void *) arg;
348
349 BT_DBG("conn %p mode %d", conn, conn->mode);
350
351 hci_conn_enter_sniff_mode(conn);
352 }
353
354 static void hci_conn_auto_accept(unsigned long arg)
355 {
356 struct hci_conn *conn = (void *) arg;
357 struct hci_dev *hdev = conn->hdev;
358
359 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
360 &conn->dst);
361 }
362
363 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
364 {
365 struct hci_conn *conn;
366
367 BT_DBG("%s dst %s", hdev->name, batostr(dst));
368
369 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
370 if (!conn)
371 return NULL;
372
373 bacpy(&conn->dst, dst);
374 conn->hdev = hdev;
375 conn->type = type;
376 conn->mode = HCI_CM_ACTIVE;
377 conn->state = BT_OPEN;
378 conn->auth_type = HCI_AT_GENERAL_BONDING;
379 conn->io_capability = hdev->io_capability;
380 conn->remote_auth = 0xff;
381 conn->key_type = 0xff;
382
383 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
384 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
385
386 switch (type) {
387 case ACL_LINK:
388 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
389 break;
390 case SCO_LINK:
391 if (lmp_esco_capable(hdev))
392 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
393 (hdev->esco_type & EDR_ESCO_MASK);
394 else
395 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
396 break;
397 case ESCO_LINK:
398 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
399 break;
400 }
401
402 skb_queue_head_init(&conn->data_q);
403
404 INIT_LIST_HEAD(&conn->chan_list);
405
406 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
407 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
408 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
409 (unsigned long) conn);
410
411 atomic_set(&conn->refcnt, 0);
412
413 hci_dev_hold(hdev);
414
415 hci_conn_hash_add(hdev, conn);
416 if (hdev->notify)
417 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
418
419 atomic_set(&conn->devref, 0);
420
421 hci_conn_init_sysfs(conn);
422
423 return conn;
424 }
425
426 int hci_conn_del(struct hci_conn *conn)
427 {
428 struct hci_dev *hdev = conn->hdev;
429
430 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
431
432 del_timer(&conn->idle_timer);
433
434 cancel_delayed_work_sync(&conn->disc_work);
435
436 del_timer(&conn->auto_accept_timer);
437
438 if (conn->type == ACL_LINK) {
439 struct hci_conn *sco = conn->link;
440 if (sco)
441 sco->link = NULL;
442
443 /* Unacked frames */
444 hdev->acl_cnt += conn->sent;
445 } else if (conn->type == LE_LINK) {
446 if (hdev->le_pkts)
447 hdev->le_cnt += conn->sent;
448 else
449 hdev->acl_cnt += conn->sent;
450 } else {
451 struct hci_conn *acl = conn->link;
452 if (acl) {
453 acl->link = NULL;
454 hci_conn_put(acl);
455 }
456 }
457
458
459 hci_chan_list_flush(conn);
460
461 hci_conn_hash_del(hdev, conn);
462 if (hdev->notify)
463 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
464
465 skb_queue_purge(&conn->data_q);
466
467 hci_conn_put_device(conn);
468
469 hci_dev_put(hdev);
470
471 if (conn->handle == 0)
472 kfree(conn);
473
474 return 0;
475 }
476
477 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
478 {
479 int use_src = bacmp(src, BDADDR_ANY);
480 struct hci_dev *hdev = NULL, *d;
481
482 BT_DBG("%s -> %s", batostr(src), batostr(dst));
483
484 read_lock(&hci_dev_list_lock);
485
486 list_for_each_entry(d, &hci_dev_list, list) {
487 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
488 continue;
489
490 /* Simple routing:
491 * No source address - find interface with bdaddr != dst
492 * Source address - find interface with bdaddr == src
493 */
494
495 if (use_src) {
496 if (!bacmp(&d->bdaddr, src)) {
497 hdev = d; break;
498 }
499 } else {
500 if (bacmp(&d->bdaddr, dst)) {
501 hdev = d; break;
502 }
503 }
504 }
505
506 if (hdev)
507 hdev = hci_dev_hold(hdev);
508
509 read_unlock(&hci_dev_list_lock);
510 return hdev;
511 }
512 EXPORT_SYMBOL(hci_get_route);
513
514 /* Create SCO, ACL or LE connection.
515 * Device _must_ be locked */
516 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
517 {
518 struct hci_conn *acl;
519 struct hci_conn *sco;
520 struct hci_conn *le;
521
522 BT_DBG("%s dst %s", hdev->name, batostr(dst));
523
524 if (type == LE_LINK) {
525 struct adv_entry *entry;
526
527 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
528 if (le)
529 return ERR_PTR(-EBUSY);
530
531 entry = hci_find_adv_entry(hdev, dst);
532 if (!entry)
533 return ERR_PTR(-EHOSTUNREACH);
534
535 le = hci_conn_add(hdev, LE_LINK, dst);
536 if (!le)
537 return ERR_PTR(-ENOMEM);
538
539 le->dst_type = entry->bdaddr_type;
540
541 hci_le_connect(le);
542
543 hci_conn_hold(le);
544
545 return le;
546 }
547
548 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
549 if (!acl) {
550 acl = hci_conn_add(hdev, ACL_LINK, dst);
551 if (!acl)
552 return ERR_PTR(-ENOMEM);
553 }
554
555 hci_conn_hold(acl);
556
557 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
558 acl->sec_level = BT_SECURITY_LOW;
559 acl->pending_sec_level = sec_level;
560 acl->auth_type = auth_type;
561 hci_acl_connect(acl);
562 }
563
564 if (type == ACL_LINK)
565 return acl;
566
567 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
568 if (!sco) {
569 sco = hci_conn_add(hdev, type, dst);
570 if (!sco) {
571 hci_conn_put(acl);
572 return ERR_PTR(-ENOMEM);
573 }
574 }
575
576 acl->link = sco;
577 sco->link = acl;
578
579 hci_conn_hold(sco);
580
581 if (acl->state == BT_CONNECTED &&
582 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
583 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
584 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
585
586 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
587 /* defer SCO setup until mode change completed */
588 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
589 return sco;
590 }
591
592 hci_sco_setup(acl, 0x00);
593 }
594
595 return sco;
596 }
597 EXPORT_SYMBOL(hci_connect);
598
599 /* Check link security requirement */
600 int hci_conn_check_link_mode(struct hci_conn *conn)
601 {
602 BT_DBG("conn %p", conn);
603
604 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
605 return 0;
606
607 return 1;
608 }
609 EXPORT_SYMBOL(hci_conn_check_link_mode);
610
611 /* Authenticate remote device */
612 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
613 {
614 BT_DBG("conn %p", conn);
615
616 if (conn->pending_sec_level > sec_level)
617 sec_level = conn->pending_sec_level;
618
619 if (sec_level > conn->sec_level)
620 conn->pending_sec_level = sec_level;
621 else if (conn->link_mode & HCI_LM_AUTH)
622 return 1;
623
624 /* Make sure we preserve an existing MITM requirement*/
625 auth_type |= (conn->auth_type & 0x01);
626
627 conn->auth_type = auth_type;
628
629 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
630 struct hci_cp_auth_requested cp;
631
632 /* encrypt must be pending if auth is also pending */
633 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
634
635 cp.handle = cpu_to_le16(conn->handle);
636 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
637 sizeof(cp), &cp);
638 if (conn->key_type != 0xff)
639 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
640 }
641
642 return 0;
643 }
644
645 /* Encrypt the the link */
646 static void hci_conn_encrypt(struct hci_conn *conn)
647 {
648 BT_DBG("conn %p", conn);
649
650 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
651 struct hci_cp_set_conn_encrypt cp;
652 cp.handle = cpu_to_le16(conn->handle);
653 cp.encrypt = 0x01;
654 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
655 &cp);
656 }
657 }
658
659 /* Enable security */
660 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
661 {
662 BT_DBG("conn %p", conn);
663
664 /* For sdp we don't need the link key. */
665 if (sec_level == BT_SECURITY_SDP)
666 return 1;
667
668 /* For non 2.1 devices and low security level we don't need the link
669 key. */
670 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
671 return 1;
672
673 /* For other security levels we need the link key. */
674 if (!(conn->link_mode & HCI_LM_AUTH))
675 goto auth;
676
677 /* An authenticated combination key has sufficient security for any
678 security level. */
679 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
680 goto encrypt;
681
682 /* An unauthenticated combination key has sufficient security for
683 security level 1 and 2. */
684 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
685 (sec_level == BT_SECURITY_MEDIUM ||
686 sec_level == BT_SECURITY_LOW))
687 goto encrypt;
688
689 /* A combination key has always sufficient security for the security
690 levels 1 or 2. High security level requires the combination key
691 is generated using maximum PIN code length (16).
692 For pre 2.1 units. */
693 if (conn->key_type == HCI_LK_COMBINATION &&
694 (sec_level != BT_SECURITY_HIGH ||
695 conn->pin_length == 16))
696 goto encrypt;
697
698 auth:
699 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
700 return 0;
701
702 if (!hci_conn_auth(conn, sec_level, auth_type))
703 return 0;
704
705 encrypt:
706 if (conn->link_mode & HCI_LM_ENCRYPT)
707 return 1;
708
709 hci_conn_encrypt(conn);
710 return 0;
711 }
712 EXPORT_SYMBOL(hci_conn_security);
713
714 /* Check secure link requirement */
715 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
716 {
717 BT_DBG("conn %p", conn);
718
719 if (sec_level != BT_SECURITY_HIGH)
720 return 1; /* Accept if non-secure is required */
721
722 if (conn->sec_level == BT_SECURITY_HIGH)
723 return 1;
724
725 return 0; /* Reject not secure link */
726 }
727 EXPORT_SYMBOL(hci_conn_check_secure);
728
729 /* Change link key */
730 int hci_conn_change_link_key(struct hci_conn *conn)
731 {
732 BT_DBG("conn %p", conn);
733
734 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
735 struct hci_cp_change_conn_link_key cp;
736 cp.handle = cpu_to_le16(conn->handle);
737 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
738 sizeof(cp), &cp);
739 }
740
741 return 0;
742 }
743 EXPORT_SYMBOL(hci_conn_change_link_key);
744
745 /* Switch role */
746 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
747 {
748 BT_DBG("conn %p", conn);
749
750 if (!role && conn->link_mode & HCI_LM_MASTER)
751 return 1;
752
753 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
754 struct hci_cp_switch_role cp;
755 bacpy(&cp.bdaddr, &conn->dst);
756 cp.role = role;
757 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
758 }
759
760 return 0;
761 }
762 EXPORT_SYMBOL(hci_conn_switch_role);
763
764 /* Enter active mode */
765 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
766 {
767 struct hci_dev *hdev = conn->hdev;
768
769 BT_DBG("conn %p mode %d", conn, conn->mode);
770
771 if (test_bit(HCI_RAW, &hdev->flags))
772 return;
773
774 if (conn->mode != HCI_CM_SNIFF)
775 goto timer;
776
777 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
778 goto timer;
779
780 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
781 struct hci_cp_exit_sniff_mode cp;
782 cp.handle = cpu_to_le16(conn->handle);
783 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
784 }
785
786 timer:
787 if (hdev->idle_timeout > 0)
788 mod_timer(&conn->idle_timer,
789 jiffies + msecs_to_jiffies(hdev->idle_timeout));
790 }
791
792 /* Drop all connection on the device */
793 void hci_conn_hash_flush(struct hci_dev *hdev)
794 {
795 struct hci_conn_hash *h = &hdev->conn_hash;
796 struct hci_conn *c, *n;
797
798 BT_DBG("hdev %s", hdev->name);
799
800 list_for_each_entry_safe(c, n, &h->list, list) {
801 c->state = BT_CLOSED;
802
803 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
804 hci_conn_del(c);
805 }
806 }
807
808 /* Check pending connect attempts */
809 void hci_conn_check_pending(struct hci_dev *hdev)
810 {
811 struct hci_conn *conn;
812
813 BT_DBG("hdev %s", hdev->name);
814
815 hci_dev_lock(hdev);
816
817 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
818 if (conn)
819 hci_acl_connect(conn);
820
821 hci_dev_unlock(hdev);
822 }
823
824 void hci_conn_hold_device(struct hci_conn *conn)
825 {
826 atomic_inc(&conn->devref);
827 }
828 EXPORT_SYMBOL(hci_conn_hold_device);
829
830 void hci_conn_put_device(struct hci_conn *conn)
831 {
832 if (atomic_dec_and_test(&conn->devref))
833 hci_conn_del_sysfs(conn);
834 }
835 EXPORT_SYMBOL(hci_conn_put_device);
836
837 int hci_get_conn_list(void __user *arg)
838 {
839 register struct hci_conn *c;
840 struct hci_conn_list_req req, *cl;
841 struct hci_conn_info *ci;
842 struct hci_dev *hdev;
843 int n = 0, size, err;
844
845 if (copy_from_user(&req, arg, sizeof(req)))
846 return -EFAULT;
847
848 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
849 return -EINVAL;
850
851 size = sizeof(req) + req.conn_num * sizeof(*ci);
852
853 cl = kmalloc(size, GFP_KERNEL);
854 if (!cl)
855 return -ENOMEM;
856
857 hdev = hci_dev_get(req.dev_id);
858 if (!hdev) {
859 kfree(cl);
860 return -ENODEV;
861 }
862
863 ci = cl->conn_info;
864
865 hci_dev_lock(hdev);
866 list_for_each_entry(c, &hdev->conn_hash.list, list) {
867 bacpy(&(ci + n)->bdaddr, &c->dst);
868 (ci + n)->handle = c->handle;
869 (ci + n)->type = c->type;
870 (ci + n)->out = c->out;
871 (ci + n)->state = c->state;
872 (ci + n)->link_mode = c->link_mode;
873 if (++n >= req.conn_num)
874 break;
875 }
876 hci_dev_unlock(hdev);
877
878 cl->dev_id = hdev->id;
879 cl->conn_num = n;
880 size = sizeof(req) + n * sizeof(*ci);
881
882 hci_dev_put(hdev);
883
884 err = copy_to_user(arg, cl, size);
885 kfree(cl);
886
887 return err ? -EFAULT : 0;
888 }
889
890 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
891 {
892 struct hci_conn_info_req req;
893 struct hci_conn_info ci;
894 struct hci_conn *conn;
895 char __user *ptr = arg + sizeof(req);
896
897 if (copy_from_user(&req, arg, sizeof(req)))
898 return -EFAULT;
899
900 hci_dev_lock(hdev);
901 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
902 if (conn) {
903 bacpy(&ci.bdaddr, &conn->dst);
904 ci.handle = conn->handle;
905 ci.type = conn->type;
906 ci.out = conn->out;
907 ci.state = conn->state;
908 ci.link_mode = conn->link_mode;
909 }
910 hci_dev_unlock(hdev);
911
912 if (!conn)
913 return -ENOENT;
914
915 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
916 }
917
918 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
919 {
920 struct hci_auth_info_req req;
921 struct hci_conn *conn;
922
923 if (copy_from_user(&req, arg, sizeof(req)))
924 return -EFAULT;
925
926 hci_dev_lock(hdev);
927 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
928 if (conn)
929 req.type = conn->auth_type;
930 hci_dev_unlock(hdev);
931
932 if (!conn)
933 return -ENOENT;
934
935 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
936 }
937
938 struct hci_chan *hci_chan_create(struct hci_conn *conn)
939 {
940 struct hci_dev *hdev = conn->hdev;
941 struct hci_chan *chan;
942
943 BT_DBG("%s conn %p", hdev->name, conn);
944
945 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
946 if (!chan)
947 return NULL;
948
949 chan->conn = conn;
950 skb_queue_head_init(&chan->data_q);
951
952 list_add_rcu(&chan->list, &conn->chan_list);
953
954 return chan;
955 }
956
957 int hci_chan_del(struct hci_chan *chan)
958 {
959 struct hci_conn *conn = chan->conn;
960 struct hci_dev *hdev = conn->hdev;
961
962 BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
963
964 list_del_rcu(&chan->list);
965
966 synchronize_rcu();
967
968 skb_queue_purge(&chan->data_q);
969 kfree(chan);
970
971 return 0;
972 }
973
974 void hci_chan_list_flush(struct hci_conn *conn)
975 {
976 struct hci_chan *chan, *n;
977
978 BT_DBG("conn %p", conn);
979
980 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
981 hci_chan_del(chan);
982 }