Merge branch 'for-3.5' of git://linux-nfs.org/~bfields/linux
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <net/sock.h>
39
40 #include <linux/uaccess.h>
41 #include <asm/unaligned.h>
42
43 #include <net/bluetooth/bluetooth.h>
44 #include <net/bluetooth/hci_core.h>
45
46 static void hci_le_connect(struct hci_conn *conn)
47 {
48 struct hci_dev *hdev = conn->hdev;
49 struct hci_cp_le_create_conn cp;
50
51 conn->state = BT_CONNECT;
52 conn->out = true;
53 conn->link_mode |= HCI_LM_MASTER;
54 conn->sec_level = BT_SECURITY_LOW;
55
56 memset(&cp, 0, sizeof(cp));
57 cp.scan_interval = cpu_to_le16(0x0060);
58 cp.scan_window = cpu_to_le16(0x0030);
59 bacpy(&cp.peer_addr, &conn->dst);
60 cp.peer_addr_type = conn->dst_type;
61 cp.conn_interval_min = cpu_to_le16(0x0028);
62 cp.conn_interval_max = cpu_to_le16(0x0038);
63 cp.supervision_timeout = cpu_to_le16(0x002a);
64 cp.min_ce_len = cpu_to_le16(0x0000);
65 cp.max_ce_len = cpu_to_le16(0x0000);
66
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
68 }
69
70 static void hci_le_connect_cancel(struct hci_conn *conn)
71 {
72 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
73 }
74
75 void hci_acl_connect(struct hci_conn *conn)
76 {
77 struct hci_dev *hdev = conn->hdev;
78 struct inquiry_entry *ie;
79 struct hci_cp_create_conn cp;
80
81 BT_DBG("hcon %p", conn);
82
83 conn->state = BT_CONNECT;
84 conn->out = true;
85
86 conn->link_mode = HCI_LM_MASTER;
87
88 conn->attempt++;
89
90 conn->link_policy = hdev->link_policy;
91
92 memset(&cp, 0, sizeof(cp));
93 bacpy(&cp.bdaddr, &conn->dst);
94 cp.pscan_rep_mode = 0x02;
95
96 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
97 if (ie) {
98 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
99 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
100 cp.pscan_mode = ie->data.pscan_mode;
101 cp.clock_offset = ie->data.clock_offset |
102 cpu_to_le16(0x8000);
103 }
104
105 memcpy(conn->dev_class, ie->data.dev_class, 3);
106 if (ie->data.ssp_mode > 0)
107 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
108 }
109
110 cp.pkt_type = cpu_to_le16(conn->pkt_type);
111 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
112 cp.role_switch = 0x01;
113 else
114 cp.role_switch = 0x00;
115
116 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
117 }
118
119 static void hci_acl_connect_cancel(struct hci_conn *conn)
120 {
121 struct hci_cp_create_conn_cancel cp;
122
123 BT_DBG("%p", conn);
124
125 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
126 return;
127
128 bacpy(&cp.bdaddr, &conn->dst);
129 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
130 }
131
132 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
133 {
134 struct hci_cp_disconnect cp;
135
136 BT_DBG("%p", conn);
137
138 conn->state = BT_DISCONN;
139
140 cp.handle = cpu_to_le16(conn->handle);
141 cp.reason = reason;
142 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
143 }
144
145 void hci_add_sco(struct hci_conn *conn, __u16 handle)
146 {
147 struct hci_dev *hdev = conn->hdev;
148 struct hci_cp_add_sco cp;
149
150 BT_DBG("%p", conn);
151
152 conn->state = BT_CONNECT;
153 conn->out = true;
154
155 conn->attempt++;
156
157 cp.handle = cpu_to_le16(handle);
158 cp.pkt_type = cpu_to_le16(conn->pkt_type);
159
160 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
161 }
162
163 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
164 {
165 struct hci_dev *hdev = conn->hdev;
166 struct hci_cp_setup_sync_conn cp;
167
168 BT_DBG("%p", conn);
169
170 conn->state = BT_CONNECT;
171 conn->out = true;
172
173 conn->attempt++;
174
175 cp.handle = cpu_to_le16(handle);
176 cp.pkt_type = cpu_to_le16(conn->pkt_type);
177
178 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
179 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
180 cp.max_latency = cpu_to_le16(0xffff);
181 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
182 cp.retrans_effort = 0xff;
183
184 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
185 }
186
187 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
188 u16 latency, u16 to_multiplier)
189 {
190 struct hci_cp_le_conn_update cp;
191 struct hci_dev *hdev = conn->hdev;
192
193 memset(&cp, 0, sizeof(cp));
194
195 cp.handle = cpu_to_le16(conn->handle);
196 cp.conn_interval_min = cpu_to_le16(min);
197 cp.conn_interval_max = cpu_to_le16(max);
198 cp.conn_latency = cpu_to_le16(latency);
199 cp.supervision_timeout = cpu_to_le16(to_multiplier);
200 cp.min_ce_len = cpu_to_le16(0x0001);
201 cp.max_ce_len = cpu_to_le16(0x0001);
202
203 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
204 }
205 EXPORT_SYMBOL(hci_le_conn_update);
206
207 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
208 __u8 ltk[16])
209 {
210 struct hci_dev *hdev = conn->hdev;
211 struct hci_cp_le_start_enc cp;
212
213 BT_DBG("%p", conn);
214
215 memset(&cp, 0, sizeof(cp));
216
217 cp.handle = cpu_to_le16(conn->handle);
218 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
219 cp.ediv = ediv;
220 memcpy(cp.rand, rand, sizeof(cp.rand));
221
222 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
223 }
224 EXPORT_SYMBOL(hci_le_start_enc);
225
226 /* Device _must_ be locked */
227 void hci_sco_setup(struct hci_conn *conn, __u8 status)
228 {
229 struct hci_conn *sco = conn->link;
230
231 BT_DBG("%p", conn);
232
233 if (!sco)
234 return;
235
236 if (!status) {
237 if (lmp_esco_capable(conn->hdev))
238 hci_setup_sync(sco, conn->handle);
239 else
240 hci_add_sco(sco, conn->handle);
241 } else {
242 hci_proto_connect_cfm(sco, status);
243 hci_conn_del(sco);
244 }
245 }
246
247 static void hci_conn_timeout(struct work_struct *work)
248 {
249 struct hci_conn *conn = container_of(work, struct hci_conn,
250 disc_work.work);
251 __u8 reason;
252
253 BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
254
255 if (atomic_read(&conn->refcnt))
256 return;
257
258 switch (conn->state) {
259 case BT_CONNECT:
260 case BT_CONNECT2:
261 if (conn->out) {
262 if (conn->type == ACL_LINK)
263 hci_acl_connect_cancel(conn);
264 else if (conn->type == LE_LINK)
265 hci_le_connect_cancel(conn);
266 }
267 break;
268 case BT_CONFIG:
269 case BT_CONNECTED:
270 reason = hci_proto_disconn_ind(conn);
271 hci_acl_disconn(conn, reason);
272 break;
273 default:
274 conn->state = BT_CLOSED;
275 break;
276 }
277 }
278
279 /* Enter sniff mode */
280 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
281 {
282 struct hci_dev *hdev = conn->hdev;
283
284 BT_DBG("conn %p mode %d", conn, conn->mode);
285
286 if (test_bit(HCI_RAW, &hdev->flags))
287 return;
288
289 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
290 return;
291
292 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
293 return;
294
295 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
296 struct hci_cp_sniff_subrate cp;
297 cp.handle = cpu_to_le16(conn->handle);
298 cp.max_latency = cpu_to_le16(0);
299 cp.min_remote_timeout = cpu_to_le16(0);
300 cp.min_local_timeout = cpu_to_le16(0);
301 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
302 }
303
304 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
305 struct hci_cp_sniff_mode cp;
306 cp.handle = cpu_to_le16(conn->handle);
307 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
308 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
309 cp.attempt = cpu_to_le16(4);
310 cp.timeout = cpu_to_le16(1);
311 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
312 }
313 }
314
315 static void hci_conn_idle(unsigned long arg)
316 {
317 struct hci_conn *conn = (void *) arg;
318
319 BT_DBG("conn %p mode %d", conn, conn->mode);
320
321 hci_conn_enter_sniff_mode(conn);
322 }
323
324 static void hci_conn_auto_accept(unsigned long arg)
325 {
326 struct hci_conn *conn = (void *) arg;
327 struct hci_dev *hdev = conn->hdev;
328
329 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
330 &conn->dst);
331 }
332
333 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
334 {
335 struct hci_conn *conn;
336
337 BT_DBG("%s dst %s", hdev->name, batostr(dst));
338
339 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
340 if (!conn)
341 return NULL;
342
343 bacpy(&conn->dst, dst);
344 conn->hdev = hdev;
345 conn->type = type;
346 conn->mode = HCI_CM_ACTIVE;
347 conn->state = BT_OPEN;
348 conn->auth_type = HCI_AT_GENERAL_BONDING;
349 conn->io_capability = hdev->io_capability;
350 conn->remote_auth = 0xff;
351 conn->key_type = 0xff;
352
353 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
354 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
355
356 switch (type) {
357 case ACL_LINK:
358 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
359 break;
360 case SCO_LINK:
361 if (lmp_esco_capable(hdev))
362 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
363 (hdev->esco_type & EDR_ESCO_MASK);
364 else
365 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
366 break;
367 case ESCO_LINK:
368 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
369 break;
370 }
371
372 skb_queue_head_init(&conn->data_q);
373
374 INIT_LIST_HEAD(&conn->chan_list);
375
376 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
377 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
378 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
379 (unsigned long) conn);
380
381 atomic_set(&conn->refcnt, 0);
382
383 hci_dev_hold(hdev);
384
385 hci_conn_hash_add(hdev, conn);
386 if (hdev->notify)
387 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
388
389 atomic_set(&conn->devref, 0);
390
391 hci_conn_init_sysfs(conn);
392
393 return conn;
394 }
395
396 int hci_conn_del(struct hci_conn *conn)
397 {
398 struct hci_dev *hdev = conn->hdev;
399
400 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
401
402 del_timer(&conn->idle_timer);
403
404 cancel_delayed_work_sync(&conn->disc_work);
405
406 del_timer(&conn->auto_accept_timer);
407
408 if (conn->type == ACL_LINK) {
409 struct hci_conn *sco = conn->link;
410 if (sco)
411 sco->link = NULL;
412
413 /* Unacked frames */
414 hdev->acl_cnt += conn->sent;
415 } else if (conn->type == LE_LINK) {
416 if (hdev->le_pkts)
417 hdev->le_cnt += conn->sent;
418 else
419 hdev->acl_cnt += conn->sent;
420 } else {
421 struct hci_conn *acl = conn->link;
422 if (acl) {
423 acl->link = NULL;
424 hci_conn_put(acl);
425 }
426 }
427
428
429 hci_chan_list_flush(conn);
430
431 hci_conn_hash_del(hdev, conn);
432 if (hdev->notify)
433 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
434
435 skb_queue_purge(&conn->data_q);
436
437 hci_conn_put_device(conn);
438
439 hci_dev_put(hdev);
440
441 if (conn->handle == 0)
442 kfree(conn);
443
444 return 0;
445 }
446
447 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
448 {
449 int use_src = bacmp(src, BDADDR_ANY);
450 struct hci_dev *hdev = NULL, *d;
451
452 BT_DBG("%s -> %s", batostr(src), batostr(dst));
453
454 read_lock(&hci_dev_list_lock);
455
456 list_for_each_entry(d, &hci_dev_list, list) {
457 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
458 continue;
459
460 /* Simple routing:
461 * No source address - find interface with bdaddr != dst
462 * Source address - find interface with bdaddr == src
463 */
464
465 if (use_src) {
466 if (!bacmp(&d->bdaddr, src)) {
467 hdev = d; break;
468 }
469 } else {
470 if (bacmp(&d->bdaddr, dst)) {
471 hdev = d; break;
472 }
473 }
474 }
475
476 if (hdev)
477 hdev = hci_dev_hold(hdev);
478
479 read_unlock(&hci_dev_list_lock);
480 return hdev;
481 }
482 EXPORT_SYMBOL(hci_get_route);
483
484 /* Create SCO, ACL or LE connection.
485 * Device _must_ be locked */
486 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
487 __u8 dst_type, __u8 sec_level, __u8 auth_type)
488 {
489 struct hci_conn *acl;
490 struct hci_conn *sco;
491 struct hci_conn *le;
492
493 BT_DBG("%s dst %s", hdev->name, batostr(dst));
494
495 if (type == LE_LINK) {
496 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
497 if (!le) {
498 le = hci_conn_add(hdev, LE_LINK, dst);
499 if (!le)
500 return ERR_PTR(-ENOMEM);
501
502 le->dst_type = bdaddr_to_le(dst_type);
503 hci_le_connect(le);
504 }
505
506 le->pending_sec_level = sec_level;
507 le->auth_type = auth_type;
508
509 hci_conn_hold(le);
510
511 return le;
512 }
513
514 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
515 if (!acl) {
516 acl = hci_conn_add(hdev, ACL_LINK, dst);
517 if (!acl)
518 return ERR_PTR(-ENOMEM);
519 }
520
521 hci_conn_hold(acl);
522
523 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
524 acl->sec_level = BT_SECURITY_LOW;
525 acl->pending_sec_level = sec_level;
526 acl->auth_type = auth_type;
527 hci_acl_connect(acl);
528 }
529
530 if (type == ACL_LINK)
531 return acl;
532
533 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
534 if (!sco) {
535 sco = hci_conn_add(hdev, type, dst);
536 if (!sco) {
537 hci_conn_put(acl);
538 return ERR_PTR(-ENOMEM);
539 }
540 }
541
542 acl->link = sco;
543 sco->link = acl;
544
545 hci_conn_hold(sco);
546
547 if (acl->state == BT_CONNECTED &&
548 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
549 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
550 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
551
552 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
553 /* defer SCO setup until mode change completed */
554 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
555 return sco;
556 }
557
558 hci_sco_setup(acl, 0x00);
559 }
560
561 return sco;
562 }
563 EXPORT_SYMBOL(hci_connect);
564
565 /* Check link security requirement */
566 int hci_conn_check_link_mode(struct hci_conn *conn)
567 {
568 BT_DBG("conn %p", conn);
569
570 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
571 return 0;
572
573 return 1;
574 }
575 EXPORT_SYMBOL(hci_conn_check_link_mode);
576
577 /* Authenticate remote device */
578 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
579 {
580 BT_DBG("conn %p", conn);
581
582 if (conn->pending_sec_level > sec_level)
583 sec_level = conn->pending_sec_level;
584
585 if (sec_level > conn->sec_level)
586 conn->pending_sec_level = sec_level;
587 else if (conn->link_mode & HCI_LM_AUTH)
588 return 1;
589
590 /* Make sure we preserve an existing MITM requirement*/
591 auth_type |= (conn->auth_type & 0x01);
592
593 conn->auth_type = auth_type;
594
595 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
596 struct hci_cp_auth_requested cp;
597
598 /* encrypt must be pending if auth is also pending */
599 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
600
601 cp.handle = cpu_to_le16(conn->handle);
602 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
603 sizeof(cp), &cp);
604 if (conn->key_type != 0xff)
605 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
606 }
607
608 return 0;
609 }
610
611 /* Encrypt the the link */
612 static void hci_conn_encrypt(struct hci_conn *conn)
613 {
614 BT_DBG("conn %p", conn);
615
616 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
617 struct hci_cp_set_conn_encrypt cp;
618 cp.handle = cpu_to_le16(conn->handle);
619 cp.encrypt = 0x01;
620 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
621 &cp);
622 }
623 }
624
625 /* Enable security */
626 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
627 {
628 BT_DBG("conn %p", conn);
629
630 /* For sdp we don't need the link key. */
631 if (sec_level == BT_SECURITY_SDP)
632 return 1;
633
634 /* For non 2.1 devices and low security level we don't need the link
635 key. */
636 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
637 return 1;
638
639 /* For other security levels we need the link key. */
640 if (!(conn->link_mode & HCI_LM_AUTH))
641 goto auth;
642
643 /* An authenticated combination key has sufficient security for any
644 security level. */
645 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
646 goto encrypt;
647
648 /* An unauthenticated combination key has sufficient security for
649 security level 1 and 2. */
650 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
651 (sec_level == BT_SECURITY_MEDIUM ||
652 sec_level == BT_SECURITY_LOW))
653 goto encrypt;
654
655 /* A combination key has always sufficient security for the security
656 levels 1 or 2. High security level requires the combination key
657 is generated using maximum PIN code length (16).
658 For pre 2.1 units. */
659 if (conn->key_type == HCI_LK_COMBINATION &&
660 (sec_level != BT_SECURITY_HIGH ||
661 conn->pin_length == 16))
662 goto encrypt;
663
664 auth:
665 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
666 return 0;
667
668 if (!hci_conn_auth(conn, sec_level, auth_type))
669 return 0;
670
671 encrypt:
672 if (conn->link_mode & HCI_LM_ENCRYPT)
673 return 1;
674
675 hci_conn_encrypt(conn);
676 return 0;
677 }
678 EXPORT_SYMBOL(hci_conn_security);
679
680 /* Check secure link requirement */
681 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
682 {
683 BT_DBG("conn %p", conn);
684
685 if (sec_level != BT_SECURITY_HIGH)
686 return 1; /* Accept if non-secure is required */
687
688 if (conn->sec_level == BT_SECURITY_HIGH)
689 return 1;
690
691 return 0; /* Reject not secure link */
692 }
693 EXPORT_SYMBOL(hci_conn_check_secure);
694
695 /* Change link key */
696 int hci_conn_change_link_key(struct hci_conn *conn)
697 {
698 BT_DBG("conn %p", conn);
699
700 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
701 struct hci_cp_change_conn_link_key cp;
702 cp.handle = cpu_to_le16(conn->handle);
703 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
704 sizeof(cp), &cp);
705 }
706
707 return 0;
708 }
709 EXPORT_SYMBOL(hci_conn_change_link_key);
710
711 /* Switch role */
712 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
713 {
714 BT_DBG("conn %p", conn);
715
716 if (!role && conn->link_mode & HCI_LM_MASTER)
717 return 1;
718
719 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
720 struct hci_cp_switch_role cp;
721 bacpy(&cp.bdaddr, &conn->dst);
722 cp.role = role;
723 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
724 }
725
726 return 0;
727 }
728 EXPORT_SYMBOL(hci_conn_switch_role);
729
730 /* Enter active mode */
731 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
732 {
733 struct hci_dev *hdev = conn->hdev;
734
735 BT_DBG("conn %p mode %d", conn, conn->mode);
736
737 if (test_bit(HCI_RAW, &hdev->flags))
738 return;
739
740 if (conn->mode != HCI_CM_SNIFF)
741 goto timer;
742
743 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
744 goto timer;
745
746 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
747 struct hci_cp_exit_sniff_mode cp;
748 cp.handle = cpu_to_le16(conn->handle);
749 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
750 }
751
752 timer:
753 if (hdev->idle_timeout > 0)
754 mod_timer(&conn->idle_timer,
755 jiffies + msecs_to_jiffies(hdev->idle_timeout));
756 }
757
758 /* Drop all connection on the device */
759 void hci_conn_hash_flush(struct hci_dev *hdev)
760 {
761 struct hci_conn_hash *h = &hdev->conn_hash;
762 struct hci_conn *c, *n;
763
764 BT_DBG("hdev %s", hdev->name);
765
766 list_for_each_entry_safe(c, n, &h->list, list) {
767 c->state = BT_CLOSED;
768
769 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
770 hci_conn_del(c);
771 }
772 }
773
774 /* Check pending connect attempts */
775 void hci_conn_check_pending(struct hci_dev *hdev)
776 {
777 struct hci_conn *conn;
778
779 BT_DBG("hdev %s", hdev->name);
780
781 hci_dev_lock(hdev);
782
783 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
784 if (conn)
785 hci_acl_connect(conn);
786
787 hci_dev_unlock(hdev);
788 }
789
790 void hci_conn_hold_device(struct hci_conn *conn)
791 {
792 atomic_inc(&conn->devref);
793 }
794 EXPORT_SYMBOL(hci_conn_hold_device);
795
796 void hci_conn_put_device(struct hci_conn *conn)
797 {
798 if (atomic_dec_and_test(&conn->devref))
799 hci_conn_del_sysfs(conn);
800 }
801 EXPORT_SYMBOL(hci_conn_put_device);
802
803 int hci_get_conn_list(void __user *arg)
804 {
805 register struct hci_conn *c;
806 struct hci_conn_list_req req, *cl;
807 struct hci_conn_info *ci;
808 struct hci_dev *hdev;
809 int n = 0, size, err;
810
811 if (copy_from_user(&req, arg, sizeof(req)))
812 return -EFAULT;
813
814 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
815 return -EINVAL;
816
817 size = sizeof(req) + req.conn_num * sizeof(*ci);
818
819 cl = kmalloc(size, GFP_KERNEL);
820 if (!cl)
821 return -ENOMEM;
822
823 hdev = hci_dev_get(req.dev_id);
824 if (!hdev) {
825 kfree(cl);
826 return -ENODEV;
827 }
828
829 ci = cl->conn_info;
830
831 hci_dev_lock(hdev);
832 list_for_each_entry(c, &hdev->conn_hash.list, list) {
833 bacpy(&(ci + n)->bdaddr, &c->dst);
834 (ci + n)->handle = c->handle;
835 (ci + n)->type = c->type;
836 (ci + n)->out = c->out;
837 (ci + n)->state = c->state;
838 (ci + n)->link_mode = c->link_mode;
839 if (++n >= req.conn_num)
840 break;
841 }
842 hci_dev_unlock(hdev);
843
844 cl->dev_id = hdev->id;
845 cl->conn_num = n;
846 size = sizeof(req) + n * sizeof(*ci);
847
848 hci_dev_put(hdev);
849
850 err = copy_to_user(arg, cl, size);
851 kfree(cl);
852
853 return err ? -EFAULT : 0;
854 }
855
856 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
857 {
858 struct hci_conn_info_req req;
859 struct hci_conn_info ci;
860 struct hci_conn *conn;
861 char __user *ptr = arg + sizeof(req);
862
863 if (copy_from_user(&req, arg, sizeof(req)))
864 return -EFAULT;
865
866 hci_dev_lock(hdev);
867 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
868 if (conn) {
869 bacpy(&ci.bdaddr, &conn->dst);
870 ci.handle = conn->handle;
871 ci.type = conn->type;
872 ci.out = conn->out;
873 ci.state = conn->state;
874 ci.link_mode = conn->link_mode;
875 }
876 hci_dev_unlock(hdev);
877
878 if (!conn)
879 return -ENOENT;
880
881 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
882 }
883
884 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
885 {
886 struct hci_auth_info_req req;
887 struct hci_conn *conn;
888
889 if (copy_from_user(&req, arg, sizeof(req)))
890 return -EFAULT;
891
892 hci_dev_lock(hdev);
893 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
894 if (conn)
895 req.type = conn->auth_type;
896 hci_dev_unlock(hdev);
897
898 if (!conn)
899 return -ENOENT;
900
901 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
902 }
903
904 struct hci_chan *hci_chan_create(struct hci_conn *conn)
905 {
906 struct hci_dev *hdev = conn->hdev;
907 struct hci_chan *chan;
908
909 BT_DBG("%s conn %p", hdev->name, conn);
910
911 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
912 if (!chan)
913 return NULL;
914
915 chan->conn = conn;
916 skb_queue_head_init(&chan->data_q);
917
918 list_add_rcu(&chan->list, &conn->chan_list);
919
920 return chan;
921 }
922
923 int hci_chan_del(struct hci_chan *chan)
924 {
925 struct hci_conn *conn = chan->conn;
926 struct hci_dev *hdev = conn->hdev;
927
928 BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
929
930 list_del_rcu(&chan->list);
931
932 synchronize_rcu();
933
934 skb_queue_purge(&chan->data_q);
935 kfree(chan);
936
937 return 0;
938 }
939
940 void hci_chan_list_flush(struct hci_conn *conn)
941 {
942 struct hci_chan *chan, *n;
943
944 BT_DBG("conn %p", conn);
945
946 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
947 hci_chan_del(chan);
948 }