Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/a2mp.h>
32 #include <net/bluetooth/smp.h>
33
34 static void hci_le_create_connection(struct hci_conn *conn)
35 {
36 struct hci_dev *hdev = conn->hdev;
37 struct hci_cp_le_create_conn cp;
38
39 conn->state = BT_CONNECT;
40 conn->out = true;
41 conn->link_mode |= HCI_LM_MASTER;
42 conn->sec_level = BT_SECURITY_LOW;
43
44 memset(&cp, 0, sizeof(cp));
45 cp.scan_interval = __constant_cpu_to_le16(0x0060);
46 cp.scan_window = __constant_cpu_to_le16(0x0030);
47 bacpy(&cp.peer_addr, &conn->dst);
48 cp.peer_addr_type = conn->dst_type;
49 cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
50 cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
51 cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
52 cp.min_ce_len = __constant_cpu_to_le16(0x0000);
53 cp.max_ce_len = __constant_cpu_to_le16(0x0000);
54
55 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
56 }
57
58 static void hci_le_create_connection_cancel(struct hci_conn *conn)
59 {
60 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
61 }
62
63 static void hci_acl_create_connection(struct hci_conn *conn)
64 {
65 struct hci_dev *hdev = conn->hdev;
66 struct inquiry_entry *ie;
67 struct hci_cp_create_conn cp;
68
69 BT_DBG("hcon %p", conn);
70
71 conn->state = BT_CONNECT;
72 conn->out = true;
73
74 conn->link_mode = HCI_LM_MASTER;
75
76 conn->attempt++;
77
78 conn->link_policy = hdev->link_policy;
79
80 memset(&cp, 0, sizeof(cp));
81 bacpy(&cp.bdaddr, &conn->dst);
82 cp.pscan_rep_mode = 0x02;
83
84 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
85 if (ie) {
86 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
87 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
88 cp.pscan_mode = ie->data.pscan_mode;
89 cp.clock_offset = ie->data.clock_offset |
90 __constant_cpu_to_le16(0x8000);
91 }
92
93 memcpy(conn->dev_class, ie->data.dev_class, 3);
94 if (ie->data.ssp_mode > 0)
95 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
96 }
97
98 cp.pkt_type = cpu_to_le16(conn->pkt_type);
99 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
100 cp.role_switch = 0x01;
101 else
102 cp.role_switch = 0x00;
103
104 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
105 }
106
107 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
108 {
109 struct hci_cp_create_conn_cancel cp;
110
111 BT_DBG("hcon %p", conn);
112
113 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
114 return;
115
116 bacpy(&cp.bdaddr, &conn->dst);
117 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
118 }
119
120 static void hci_reject_sco(struct hci_conn *conn)
121 {
122 struct hci_cp_reject_sync_conn_req cp;
123
124 cp.reason = HCI_ERROR_REMOTE_USER_TERM;
125 bacpy(&cp.bdaddr, &conn->dst);
126
127 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
128 }
129
130 void hci_disconnect(struct hci_conn *conn, __u8 reason)
131 {
132 struct hci_cp_disconnect cp;
133
134 BT_DBG("hcon %p", conn);
135
136 conn->state = BT_DISCONN;
137
138 cp.handle = cpu_to_le16(conn->handle);
139 cp.reason = reason;
140 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
141 }
142
143 static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
144 {
145 struct hci_cp_disconn_phy_link cp;
146
147 BT_DBG("hcon %p", conn);
148
149 conn->state = BT_DISCONN;
150
151 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
152 cp.reason = reason;
153 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
154 sizeof(cp), &cp);
155 }
156
157 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
158 {
159 struct hci_dev *hdev = conn->hdev;
160 struct hci_cp_add_sco cp;
161
162 BT_DBG("hcon %p", conn);
163
164 conn->state = BT_CONNECT;
165 conn->out = true;
166
167 conn->attempt++;
168
169 cp.handle = cpu_to_le16(handle);
170 cp.pkt_type = cpu_to_le16(conn->pkt_type);
171
172 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
173 }
174
175 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
176 {
177 struct hci_dev *hdev = conn->hdev;
178 struct hci_cp_setup_sync_conn cp;
179
180 BT_DBG("hcon %p", conn);
181
182 conn->state = BT_CONNECT;
183 conn->out = true;
184
185 conn->attempt++;
186
187 cp.handle = cpu_to_le16(handle);
188 cp.pkt_type = cpu_to_le16(conn->pkt_type);
189
190 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
191 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
192 cp.max_latency = __constant_cpu_to_le16(0xffff);
193 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
194 cp.retrans_effort = 0xff;
195
196 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
197 }
198
199 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
200 u16 latency, u16 to_multiplier)
201 {
202 struct hci_cp_le_conn_update cp;
203 struct hci_dev *hdev = conn->hdev;
204
205 memset(&cp, 0, sizeof(cp));
206
207 cp.handle = cpu_to_le16(conn->handle);
208 cp.conn_interval_min = cpu_to_le16(min);
209 cp.conn_interval_max = cpu_to_le16(max);
210 cp.conn_latency = cpu_to_le16(latency);
211 cp.supervision_timeout = cpu_to_le16(to_multiplier);
212 cp.min_ce_len = __constant_cpu_to_le16(0x0001);
213 cp.max_ce_len = __constant_cpu_to_le16(0x0001);
214
215 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
216 }
217
218 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
219 __u8 ltk[16])
220 {
221 struct hci_dev *hdev = conn->hdev;
222 struct hci_cp_le_start_enc cp;
223
224 BT_DBG("hcon %p", conn);
225
226 memset(&cp, 0, sizeof(cp));
227
228 cp.handle = cpu_to_le16(conn->handle);
229 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
230 cp.ediv = ediv;
231 memcpy(cp.rand, rand, sizeof(cp.rand));
232
233 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
234 }
235
236 /* Device _must_ be locked */
237 void hci_sco_setup(struct hci_conn *conn, __u8 status)
238 {
239 struct hci_conn *sco = conn->link;
240
241 if (!sco)
242 return;
243
244 BT_DBG("hcon %p", conn);
245
246 if (!status) {
247 if (lmp_esco_capable(conn->hdev))
248 hci_setup_sync(sco, conn->handle);
249 else
250 hci_add_sco(sco, conn->handle);
251 } else {
252 hci_proto_connect_cfm(sco, status);
253 hci_conn_del(sco);
254 }
255 }
256
257 static void hci_conn_disconnect(struct hci_conn *conn)
258 {
259 __u8 reason = hci_proto_disconn_ind(conn);
260
261 switch (conn->type) {
262 case AMP_LINK:
263 hci_amp_disconn(conn, reason);
264 break;
265 default:
266 hci_disconnect(conn, reason);
267 break;
268 }
269 }
270
271 static void hci_conn_timeout(struct work_struct *work)
272 {
273 struct hci_conn *conn = container_of(work, struct hci_conn,
274 disc_work.work);
275
276 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
277
278 if (atomic_read(&conn->refcnt))
279 return;
280
281 switch (conn->state) {
282 case BT_CONNECT:
283 case BT_CONNECT2:
284 if (conn->out) {
285 if (conn->type == ACL_LINK)
286 hci_acl_create_connection_cancel(conn);
287 else if (conn->type == LE_LINK)
288 hci_le_create_connection_cancel(conn);
289 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
290 hci_reject_sco(conn);
291 }
292 break;
293 case BT_CONFIG:
294 case BT_CONNECTED:
295 hci_conn_disconnect(conn);
296 break;
297 default:
298 conn->state = BT_CLOSED;
299 break;
300 }
301 }
302
303 /* Enter sniff mode */
304 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
305 {
306 struct hci_dev *hdev = conn->hdev;
307
308 BT_DBG("hcon %p mode %d", conn, conn->mode);
309
310 if (test_bit(HCI_RAW, &hdev->flags))
311 return;
312
313 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
314 return;
315
316 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
317 return;
318
319 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
320 struct hci_cp_sniff_subrate cp;
321 cp.handle = cpu_to_le16(conn->handle);
322 cp.max_latency = __constant_cpu_to_le16(0);
323 cp.min_remote_timeout = __constant_cpu_to_le16(0);
324 cp.min_local_timeout = __constant_cpu_to_le16(0);
325 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
326 }
327
328 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
329 struct hci_cp_sniff_mode cp;
330 cp.handle = cpu_to_le16(conn->handle);
331 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
332 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
333 cp.attempt = __constant_cpu_to_le16(4);
334 cp.timeout = __constant_cpu_to_le16(1);
335 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
336 }
337 }
338
339 static void hci_conn_idle(unsigned long arg)
340 {
341 struct hci_conn *conn = (void *) arg;
342
343 BT_DBG("hcon %p mode %d", conn, conn->mode);
344
345 hci_conn_enter_sniff_mode(conn);
346 }
347
348 static void hci_conn_auto_accept(unsigned long arg)
349 {
350 struct hci_conn *conn = (void *) arg;
351 struct hci_dev *hdev = conn->hdev;
352
353 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
354 &conn->dst);
355 }
356
357 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
358 __u16 pkt_type, bdaddr_t *dst)
359 {
360 struct hci_conn *conn;
361
362 BT_DBG("%s dst %pMR", hdev->name, dst);
363
364 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
365 if (!conn)
366 return NULL;
367
368 bacpy(&conn->dst, dst);
369 conn->hdev = hdev;
370 conn->type = type;
371 conn->mode = HCI_CM_ACTIVE;
372 conn->state = BT_OPEN;
373 conn->auth_type = HCI_AT_GENERAL_BONDING;
374 conn->io_capability = hdev->io_capability;
375 conn->remote_auth = 0xff;
376 conn->key_type = 0xff;
377
378 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
379 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
380
381 switch (type) {
382 case ACL_LINK:
383 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
384 break;
385 case SCO_LINK:
386 if (!pkt_type)
387 pkt_type = SCO_ESCO_MASK;
388 case ESCO_LINK:
389 if (!pkt_type)
390 pkt_type = ALL_ESCO_MASK;
391 if (lmp_esco_capable(hdev)) {
392 /* HCI Setup Synchronous Connection Command uses
393 reverse logic on the EDR_ESCO_MASK bits */
394 conn->pkt_type = (pkt_type ^ EDR_ESCO_MASK) &
395 hdev->esco_type;
396 } else {
397 /* Legacy HCI Add Sco Connection Command uses a
398 shifted bitmask */
399 conn->pkt_type = (pkt_type << 5) & hdev->pkt_type &
400 SCO_PTYPE_MASK;
401 }
402 break;
403 }
404
405 skb_queue_head_init(&conn->data_q);
406
407 INIT_LIST_HEAD(&conn->chan_list);
408
409 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
410 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
411 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
412 (unsigned long) conn);
413
414 atomic_set(&conn->refcnt, 0);
415
416 hci_dev_hold(hdev);
417
418 hci_conn_hash_add(hdev, conn);
419 if (hdev->notify)
420 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
421
422 hci_conn_init_sysfs(conn);
423
424 return conn;
425 }
426
427 int hci_conn_del(struct hci_conn *conn)
428 {
429 struct hci_dev *hdev = conn->hdev;
430
431 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
432
433 del_timer(&conn->idle_timer);
434
435 cancel_delayed_work_sync(&conn->disc_work);
436
437 del_timer(&conn->auto_accept_timer);
438
439 if (conn->type == ACL_LINK) {
440 struct hci_conn *sco = conn->link;
441 if (sco)
442 sco->link = NULL;
443
444 /* Unacked frames */
445 hdev->acl_cnt += conn->sent;
446 } else if (conn->type == LE_LINK) {
447 if (hdev->le_pkts)
448 hdev->le_cnt += conn->sent;
449 else
450 hdev->acl_cnt += conn->sent;
451 } else {
452 struct hci_conn *acl = conn->link;
453 if (acl) {
454 acl->link = NULL;
455 hci_conn_drop(acl);
456 }
457 }
458
459 hci_chan_list_flush(conn);
460
461 if (conn->amp_mgr)
462 amp_mgr_put(conn->amp_mgr);
463
464 hci_conn_hash_del(hdev, conn);
465 if (hdev->notify)
466 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
467
468 skb_queue_purge(&conn->data_q);
469
470 hci_conn_del_sysfs(conn);
471
472 hci_dev_put(hdev);
473
474 hci_conn_put(conn);
475
476 return 0;
477 }
478
479 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
480 {
481 int use_src = bacmp(src, BDADDR_ANY);
482 struct hci_dev *hdev = NULL, *d;
483
484 BT_DBG("%pMR -> %pMR", src, dst);
485
486 read_lock(&hci_dev_list_lock);
487
488 list_for_each_entry(d, &hci_dev_list, list) {
489 if (!test_bit(HCI_UP, &d->flags) ||
490 test_bit(HCI_RAW, &d->flags) ||
491 d->dev_type != HCI_BREDR)
492 continue;
493
494 /* Simple routing:
495 * No source address - find interface with bdaddr != dst
496 * Source address - find interface with bdaddr == src
497 */
498
499 if (use_src) {
500 if (!bacmp(&d->bdaddr, src)) {
501 hdev = d; break;
502 }
503 } else {
504 if (bacmp(&d->bdaddr, dst)) {
505 hdev = d; break;
506 }
507 }
508 }
509
510 if (hdev)
511 hdev = hci_dev_hold(hdev);
512
513 read_unlock(&hci_dev_list_lock);
514 return hdev;
515 }
516 EXPORT_SYMBOL(hci_get_route);
517
518 static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
519 u8 dst_type, u8 sec_level, u8 auth_type)
520 {
521 struct hci_conn *le;
522
523 if (test_bit(HCI_LE_PERIPHERAL, &hdev->flags))
524 return ERR_PTR(-ENOTSUPP);
525
526 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
527 if (!le) {
528 le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
529 if (le)
530 return ERR_PTR(-EBUSY);
531
532 le = hci_conn_add(hdev, LE_LINK, 0, dst);
533 if (!le)
534 return ERR_PTR(-ENOMEM);
535
536 le->dst_type = bdaddr_to_le(dst_type);
537 hci_le_create_connection(le);
538 }
539
540 le->pending_sec_level = sec_level;
541 le->auth_type = auth_type;
542
543 hci_conn_hold(le);
544
545 return le;
546 }
547
548 static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
549 u8 sec_level, u8 auth_type)
550 {
551 struct hci_conn *acl;
552
553 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
554 if (!acl) {
555 acl = hci_conn_add(hdev, ACL_LINK, 0, dst);
556 if (!acl)
557 return ERR_PTR(-ENOMEM);
558 }
559
560 hci_conn_hold(acl);
561
562 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
563 acl->sec_level = BT_SECURITY_LOW;
564 acl->pending_sec_level = sec_level;
565 acl->auth_type = auth_type;
566 hci_acl_create_connection(acl);
567 }
568
569 return acl;
570 }
571
572 static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
573 __u16 pkt_type, bdaddr_t *dst,
574 u8 sec_level, u8 auth_type)
575 {
576 struct hci_conn *acl;
577 struct hci_conn *sco;
578
579 acl = hci_connect_acl(hdev, dst, sec_level, auth_type);
580 if (IS_ERR(acl))
581 return acl;
582
583 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
584 if (!sco) {
585 sco = hci_conn_add(hdev, type, pkt_type, dst);
586 if (!sco) {
587 hci_conn_drop(acl);
588 return ERR_PTR(-ENOMEM);
589 }
590 }
591
592 acl->link = sco;
593 sco->link = acl;
594
595 hci_conn_hold(sco);
596
597 if (acl->state == BT_CONNECTED &&
598 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
599 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
600 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
601
602 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
603 /* defer SCO setup until mode change completed */
604 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
605 return sco;
606 }
607
608 hci_sco_setup(acl, 0x00);
609 }
610
611 return sco;
612 }
613
614 /* Create SCO, ACL or LE connection. */
615 struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
616 __u16 pkt_type, bdaddr_t *dst,
617 __u8 dst_type, __u8 sec_level, __u8 auth_type)
618 {
619 BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type);
620
621 switch (type) {
622 case LE_LINK:
623 return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
624 case ACL_LINK:
625 return hci_connect_acl(hdev, dst, sec_level, auth_type);
626 case SCO_LINK:
627 case ESCO_LINK:
628 return hci_connect_sco(hdev, type, pkt_type, dst, sec_level, auth_type);
629 }
630
631 return ERR_PTR(-EINVAL);
632 }
633
634 /* Check link security requirement */
635 int hci_conn_check_link_mode(struct hci_conn *conn)
636 {
637 BT_DBG("hcon %p", conn);
638
639 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
640 return 0;
641
642 return 1;
643 }
644
645 /* Authenticate remote device */
646 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
647 {
648 BT_DBG("hcon %p", conn);
649
650 if (conn->pending_sec_level > sec_level)
651 sec_level = conn->pending_sec_level;
652
653 if (sec_level > conn->sec_level)
654 conn->pending_sec_level = sec_level;
655 else if (conn->link_mode & HCI_LM_AUTH)
656 return 1;
657
658 /* Make sure we preserve an existing MITM requirement*/
659 auth_type |= (conn->auth_type & 0x01);
660
661 conn->auth_type = auth_type;
662
663 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
664 struct hci_cp_auth_requested cp;
665
666 cp.handle = cpu_to_le16(conn->handle);
667 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
668 sizeof(cp), &cp);
669
670 /* If we're already encrypted set the REAUTH_PEND flag,
671 * otherwise set the ENCRYPT_PEND.
672 */
673 if (conn->link_mode & HCI_LM_ENCRYPT)
674 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
675 else
676 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
677 }
678
679 return 0;
680 }
681
682 /* Encrypt the the link */
683 static void hci_conn_encrypt(struct hci_conn *conn)
684 {
685 BT_DBG("hcon %p", conn);
686
687 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
688 struct hci_cp_set_conn_encrypt cp;
689 cp.handle = cpu_to_le16(conn->handle);
690 cp.encrypt = 0x01;
691 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
692 &cp);
693 }
694 }
695
696 /* Enable security */
697 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
698 {
699 BT_DBG("hcon %p", conn);
700
701 if (conn->type == LE_LINK)
702 return smp_conn_security(conn, sec_level);
703
704 /* For sdp we don't need the link key. */
705 if (sec_level == BT_SECURITY_SDP)
706 return 1;
707
708 /* For non 2.1 devices and low security level we don't need the link
709 key. */
710 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
711 return 1;
712
713 /* For other security levels we need the link key. */
714 if (!(conn->link_mode & HCI_LM_AUTH))
715 goto auth;
716
717 /* An authenticated combination key has sufficient security for any
718 security level. */
719 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
720 goto encrypt;
721
722 /* An unauthenticated combination key has sufficient security for
723 security level 1 and 2. */
724 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
725 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
726 goto encrypt;
727
728 /* A combination key has always sufficient security for the security
729 levels 1 or 2. High security level requires the combination key
730 is generated using maximum PIN code length (16).
731 For pre 2.1 units. */
732 if (conn->key_type == HCI_LK_COMBINATION &&
733 (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
734 goto encrypt;
735
736 auth:
737 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
738 return 0;
739
740 if (!hci_conn_auth(conn, sec_level, auth_type))
741 return 0;
742
743 encrypt:
744 if (conn->link_mode & HCI_LM_ENCRYPT)
745 return 1;
746
747 hci_conn_encrypt(conn);
748 return 0;
749 }
750 EXPORT_SYMBOL(hci_conn_security);
751
752 /* Check secure link requirement */
753 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
754 {
755 BT_DBG("hcon %p", conn);
756
757 if (sec_level != BT_SECURITY_HIGH)
758 return 1; /* Accept if non-secure is required */
759
760 if (conn->sec_level == BT_SECURITY_HIGH)
761 return 1;
762
763 return 0; /* Reject not secure link */
764 }
765 EXPORT_SYMBOL(hci_conn_check_secure);
766
767 /* Change link key */
768 int hci_conn_change_link_key(struct hci_conn *conn)
769 {
770 BT_DBG("hcon %p", conn);
771
772 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
773 struct hci_cp_change_conn_link_key cp;
774 cp.handle = cpu_to_le16(conn->handle);
775 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
776 sizeof(cp), &cp);
777 }
778
779 return 0;
780 }
781
782 /* Switch role */
783 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
784 {
785 BT_DBG("hcon %p", conn);
786
787 if (!role && conn->link_mode & HCI_LM_MASTER)
788 return 1;
789
790 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
791 struct hci_cp_switch_role cp;
792 bacpy(&cp.bdaddr, &conn->dst);
793 cp.role = role;
794 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
795 }
796
797 return 0;
798 }
799 EXPORT_SYMBOL(hci_conn_switch_role);
800
801 /* Enter active mode */
802 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
803 {
804 struct hci_dev *hdev = conn->hdev;
805
806 BT_DBG("hcon %p mode %d", conn, conn->mode);
807
808 if (test_bit(HCI_RAW, &hdev->flags))
809 return;
810
811 if (conn->mode != HCI_CM_SNIFF)
812 goto timer;
813
814 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
815 goto timer;
816
817 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
818 struct hci_cp_exit_sniff_mode cp;
819 cp.handle = cpu_to_le16(conn->handle);
820 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
821 }
822
823 timer:
824 if (hdev->idle_timeout > 0)
825 mod_timer(&conn->idle_timer,
826 jiffies + msecs_to_jiffies(hdev->idle_timeout));
827 }
828
829 /* Drop all connection on the device */
830 void hci_conn_hash_flush(struct hci_dev *hdev)
831 {
832 struct hci_conn_hash *h = &hdev->conn_hash;
833 struct hci_conn *c, *n;
834
835 BT_DBG("hdev %s", hdev->name);
836
837 list_for_each_entry_safe(c, n, &h->list, list) {
838 c->state = BT_CLOSED;
839
840 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
841 hci_conn_del(c);
842 }
843 }
844
845 /* Check pending connect attempts */
846 void hci_conn_check_pending(struct hci_dev *hdev)
847 {
848 struct hci_conn *conn;
849
850 BT_DBG("hdev %s", hdev->name);
851
852 hci_dev_lock(hdev);
853
854 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
855 if (conn)
856 hci_acl_create_connection(conn);
857
858 hci_dev_unlock(hdev);
859 }
860
861 int hci_get_conn_list(void __user *arg)
862 {
863 struct hci_conn *c;
864 struct hci_conn_list_req req, *cl;
865 struct hci_conn_info *ci;
866 struct hci_dev *hdev;
867 int n = 0, size, err;
868
869 if (copy_from_user(&req, arg, sizeof(req)))
870 return -EFAULT;
871
872 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
873 return -EINVAL;
874
875 size = sizeof(req) + req.conn_num * sizeof(*ci);
876
877 cl = kmalloc(size, GFP_KERNEL);
878 if (!cl)
879 return -ENOMEM;
880
881 hdev = hci_dev_get(req.dev_id);
882 if (!hdev) {
883 kfree(cl);
884 return -ENODEV;
885 }
886
887 ci = cl->conn_info;
888
889 hci_dev_lock(hdev);
890 list_for_each_entry(c, &hdev->conn_hash.list, list) {
891 bacpy(&(ci + n)->bdaddr, &c->dst);
892 (ci + n)->handle = c->handle;
893 (ci + n)->type = c->type;
894 (ci + n)->out = c->out;
895 (ci + n)->state = c->state;
896 (ci + n)->link_mode = c->link_mode;
897 if (c->type == SCO_LINK) {
898 (ci + n)->mtu = hdev->sco_mtu;
899 (ci + n)->cnt = hdev->sco_cnt;
900 (ci + n)->pkts = hdev->sco_pkts;
901 } else {
902 (ci + n)->mtu = hdev->acl_mtu;
903 (ci + n)->cnt = hdev->acl_cnt;
904 (ci + n)->pkts = hdev->acl_pkts;
905 }
906 if (++n >= req.conn_num)
907 break;
908 }
909 hci_dev_unlock(hdev);
910
911 cl->dev_id = hdev->id;
912 cl->conn_num = n;
913 size = sizeof(req) + n * sizeof(*ci);
914
915 hci_dev_put(hdev);
916
917 err = copy_to_user(arg, cl, size);
918 kfree(cl);
919
920 return err ? -EFAULT : 0;
921 }
922
923 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
924 {
925 struct hci_conn_info_req req;
926 struct hci_conn_info ci;
927 struct hci_conn *conn;
928 char __user *ptr = arg + sizeof(req);
929
930 if (copy_from_user(&req, arg, sizeof(req)))
931 return -EFAULT;
932
933 hci_dev_lock(hdev);
934 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
935 if (conn) {
936 bacpy(&ci.bdaddr, &conn->dst);
937 ci.handle = conn->handle;
938 ci.type = conn->type;
939 ci.out = conn->out;
940 ci.state = conn->state;
941 ci.link_mode = conn->link_mode;
942 if (req.type == SCO_LINK) {
943 ci.mtu = hdev->sco_mtu;
944 ci.cnt = hdev->sco_cnt;
945 ci.pkts = hdev->sco_pkts;
946 } else {
947 ci.mtu = hdev->acl_mtu;
948 ci.cnt = hdev->acl_cnt;
949 ci.pkts = hdev->acl_pkts;
950 }
951 }
952 hci_dev_unlock(hdev);
953
954 if (!conn)
955 return -ENOENT;
956
957 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
958 }
959
960 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
961 {
962 struct hci_auth_info_req req;
963 struct hci_conn *conn;
964
965 if (copy_from_user(&req, arg, sizeof(req)))
966 return -EFAULT;
967
968 hci_dev_lock(hdev);
969 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
970 if (conn)
971 req.type = conn->auth_type;
972 hci_dev_unlock(hdev);
973
974 if (!conn)
975 return -ENOENT;
976
977 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
978 }
979
980 struct hci_chan *hci_chan_create(struct hci_conn *conn)
981 {
982 struct hci_dev *hdev = conn->hdev;
983 struct hci_chan *chan;
984
985 BT_DBG("%s hcon %p", hdev->name, conn);
986
987 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
988 if (!chan)
989 return NULL;
990
991 chan->conn = conn;
992 skb_queue_head_init(&chan->data_q);
993 chan->state = BT_CONNECTED;
994
995 list_add_rcu(&chan->list, &conn->chan_list);
996
997 return chan;
998 }
999
1000 void hci_chan_del(struct hci_chan *chan)
1001 {
1002 struct hci_conn *conn = chan->conn;
1003 struct hci_dev *hdev = conn->hdev;
1004
1005 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1006
1007 list_del_rcu(&chan->list);
1008
1009 synchronize_rcu();
1010
1011 hci_conn_drop(conn);
1012
1013 skb_queue_purge(&chan->data_q);
1014 kfree(chan);
1015 }
1016
1017 void hci_chan_list_flush(struct hci_conn *conn)
1018 {
1019 struct hci_chan *chan, *n;
1020
1021 BT_DBG("hcon %p", conn);
1022
1023 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1024 hci_chan_del(chan);
1025 }
1026
1027 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1028 __u16 handle)
1029 {
1030 struct hci_chan *hchan;
1031
1032 list_for_each_entry(hchan, &hcon->chan_list, list) {
1033 if (hchan->handle == handle)
1034 return hchan;
1035 }
1036
1037 return NULL;
1038 }
1039
1040 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1041 {
1042 struct hci_conn_hash *h = &hdev->conn_hash;
1043 struct hci_conn *hcon;
1044 struct hci_chan *hchan = NULL;
1045
1046 rcu_read_lock();
1047
1048 list_for_each_entry_rcu(hcon, &h->list, list) {
1049 hchan = __hci_chan_lookup_handle(hcon, handle);
1050 if (hchan)
1051 break;
1052 }
1053
1054 rcu_read_unlock();
1055
1056 return hchan;
1057 }