Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/a2mp.h>
32 #include <net/bluetooth/smp.h>
33
34 static void hci_le_create_connection(struct hci_conn *conn)
35 {
36 struct hci_dev *hdev = conn->hdev;
37 struct hci_cp_le_create_conn cp;
38
39 conn->state = BT_CONNECT;
40 conn->out = true;
41 conn->link_mode |= HCI_LM_MASTER;
42 conn->sec_level = BT_SECURITY_LOW;
43
44 memset(&cp, 0, sizeof(cp));
45 cp.scan_interval = __constant_cpu_to_le16(0x0060);
46 cp.scan_window = __constant_cpu_to_le16(0x0030);
47 bacpy(&cp.peer_addr, &conn->dst);
48 cp.peer_addr_type = conn->dst_type;
49 cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
50 cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
51 cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
52 cp.min_ce_len = __constant_cpu_to_le16(0x0000);
53 cp.max_ce_len = __constant_cpu_to_le16(0x0000);
54
55 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
56 }
57
58 static void hci_le_create_connection_cancel(struct hci_conn *conn)
59 {
60 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
61 }
62
63 static void hci_acl_create_connection(struct hci_conn *conn)
64 {
65 struct hci_dev *hdev = conn->hdev;
66 struct inquiry_entry *ie;
67 struct hci_cp_create_conn cp;
68
69 BT_DBG("hcon %p", conn);
70
71 conn->state = BT_CONNECT;
72 conn->out = true;
73
74 conn->link_mode = HCI_LM_MASTER;
75
76 conn->attempt++;
77
78 conn->link_policy = hdev->link_policy;
79
80 memset(&cp, 0, sizeof(cp));
81 bacpy(&cp.bdaddr, &conn->dst);
82 cp.pscan_rep_mode = 0x02;
83
84 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
85 if (ie) {
86 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
87 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
88 cp.pscan_mode = ie->data.pscan_mode;
89 cp.clock_offset = ie->data.clock_offset |
90 __constant_cpu_to_le16(0x8000);
91 }
92
93 memcpy(conn->dev_class, ie->data.dev_class, 3);
94 if (ie->data.ssp_mode > 0)
95 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
96 }
97
98 cp.pkt_type = cpu_to_le16(conn->pkt_type);
99 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
100 cp.role_switch = 0x01;
101 else
102 cp.role_switch = 0x00;
103
104 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
105 }
106
107 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
108 {
109 struct hci_cp_create_conn_cancel cp;
110
111 BT_DBG("hcon %p", conn);
112
113 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
114 return;
115
116 bacpy(&cp.bdaddr, &conn->dst);
117 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
118 }
119
120 static void hci_reject_sco(struct hci_conn *conn)
121 {
122 struct hci_cp_reject_sync_conn_req cp;
123
124 cp.reason = HCI_ERROR_REMOTE_USER_TERM;
125 bacpy(&cp.bdaddr, &conn->dst);
126
127 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
128 }
129
130 void hci_disconnect(struct hci_conn *conn, __u8 reason)
131 {
132 struct hci_cp_disconnect cp;
133
134 BT_DBG("hcon %p", conn);
135
136 conn->state = BT_DISCONN;
137
138 cp.handle = cpu_to_le16(conn->handle);
139 cp.reason = reason;
140 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
141 }
142
143 static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
144 {
145 struct hci_cp_disconn_phy_link cp;
146
147 BT_DBG("hcon %p", conn);
148
149 conn->state = BT_DISCONN;
150
151 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
152 cp.reason = reason;
153 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
154 sizeof(cp), &cp);
155 }
156
157 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
158 {
159 struct hci_dev *hdev = conn->hdev;
160 struct hci_cp_add_sco cp;
161
162 BT_DBG("hcon %p", conn);
163
164 conn->state = BT_CONNECT;
165 conn->out = true;
166
167 conn->attempt++;
168
169 cp.handle = cpu_to_le16(handle);
170 cp.pkt_type = cpu_to_le16(conn->pkt_type);
171
172 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
173 }
174
175 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
176 {
177 struct hci_dev *hdev = conn->hdev;
178 struct hci_cp_setup_sync_conn cp;
179
180 BT_DBG("hcon %p", conn);
181
182 conn->state = BT_CONNECT;
183 conn->out = true;
184
185 conn->attempt++;
186
187 cp.handle = cpu_to_le16(handle);
188 cp.pkt_type = cpu_to_le16(conn->pkt_type);
189
190 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
191 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
192 cp.max_latency = __constant_cpu_to_le16(0xffff);
193 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
194 cp.retrans_effort = 0xff;
195
196 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
197 }
198
199 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
200 u16 latency, u16 to_multiplier)
201 {
202 struct hci_cp_le_conn_update cp;
203 struct hci_dev *hdev = conn->hdev;
204
205 memset(&cp, 0, sizeof(cp));
206
207 cp.handle = cpu_to_le16(conn->handle);
208 cp.conn_interval_min = cpu_to_le16(min);
209 cp.conn_interval_max = cpu_to_le16(max);
210 cp.conn_latency = cpu_to_le16(latency);
211 cp.supervision_timeout = cpu_to_le16(to_multiplier);
212 cp.min_ce_len = __constant_cpu_to_le16(0x0001);
213 cp.max_ce_len = __constant_cpu_to_le16(0x0001);
214
215 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
216 }
217
218 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
219 __u8 ltk[16])
220 {
221 struct hci_dev *hdev = conn->hdev;
222 struct hci_cp_le_start_enc cp;
223
224 BT_DBG("hcon %p", conn);
225
226 memset(&cp, 0, sizeof(cp));
227
228 cp.handle = cpu_to_le16(conn->handle);
229 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
230 cp.ediv = ediv;
231 memcpy(cp.rand, rand, sizeof(cp.rand));
232
233 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
234 }
235
236 /* Device _must_ be locked */
237 void hci_sco_setup(struct hci_conn *conn, __u8 status)
238 {
239 struct hci_conn *sco = conn->link;
240
241 if (!sco)
242 return;
243
244 BT_DBG("hcon %p", conn);
245
246 if (!status) {
247 if (lmp_esco_capable(conn->hdev))
248 hci_setup_sync(sco, conn->handle);
249 else
250 hci_add_sco(sco, conn->handle);
251 } else {
252 hci_proto_connect_cfm(sco, status);
253 hci_conn_del(sco);
254 }
255 }
256
257 static void hci_conn_disconnect(struct hci_conn *conn)
258 {
259 __u8 reason = hci_proto_disconn_ind(conn);
260
261 switch (conn->type) {
262 case AMP_LINK:
263 hci_amp_disconn(conn, reason);
264 break;
265 default:
266 hci_disconnect(conn, reason);
267 break;
268 }
269 }
270
271 static void hci_conn_timeout(struct work_struct *work)
272 {
273 struct hci_conn *conn = container_of(work, struct hci_conn,
274 disc_work.work);
275
276 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
277
278 if (atomic_read(&conn->refcnt))
279 return;
280
281 switch (conn->state) {
282 case BT_CONNECT:
283 case BT_CONNECT2:
284 if (conn->out) {
285 if (conn->type == ACL_LINK)
286 hci_acl_create_connection_cancel(conn);
287 else if (conn->type == LE_LINK)
288 hci_le_create_connection_cancel(conn);
289 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
290 hci_reject_sco(conn);
291 }
292 break;
293 case BT_CONFIG:
294 case BT_CONNECTED:
295 hci_conn_disconnect(conn);
296 break;
297 default:
298 conn->state = BT_CLOSED;
299 break;
300 }
301 }
302
303 /* Enter sniff mode */
304 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
305 {
306 struct hci_dev *hdev = conn->hdev;
307
308 BT_DBG("hcon %p mode %d", conn, conn->mode);
309
310 if (test_bit(HCI_RAW, &hdev->flags))
311 return;
312
313 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
314 return;
315
316 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
317 return;
318
319 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
320 struct hci_cp_sniff_subrate cp;
321 cp.handle = cpu_to_le16(conn->handle);
322 cp.max_latency = __constant_cpu_to_le16(0);
323 cp.min_remote_timeout = __constant_cpu_to_le16(0);
324 cp.min_local_timeout = __constant_cpu_to_le16(0);
325 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
326 }
327
328 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
329 struct hci_cp_sniff_mode cp;
330 cp.handle = cpu_to_le16(conn->handle);
331 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
332 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
333 cp.attempt = __constant_cpu_to_le16(4);
334 cp.timeout = __constant_cpu_to_le16(1);
335 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
336 }
337 }
338
339 static void hci_conn_idle(unsigned long arg)
340 {
341 struct hci_conn *conn = (void *) arg;
342
343 BT_DBG("hcon %p mode %d", conn, conn->mode);
344
345 hci_conn_enter_sniff_mode(conn);
346 }
347
348 static void hci_conn_auto_accept(unsigned long arg)
349 {
350 struct hci_conn *conn = (void *) arg;
351 struct hci_dev *hdev = conn->hdev;
352
353 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
354 &conn->dst);
355 }
356
357 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
358 {
359 struct hci_conn *conn;
360
361 BT_DBG("%s dst %pMR", hdev->name, dst);
362
363 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
364 if (!conn)
365 return NULL;
366
367 bacpy(&conn->dst, dst);
368 conn->hdev = hdev;
369 conn->type = type;
370 conn->mode = HCI_CM_ACTIVE;
371 conn->state = BT_OPEN;
372 conn->auth_type = HCI_AT_GENERAL_BONDING;
373 conn->io_capability = hdev->io_capability;
374 conn->remote_auth = 0xff;
375 conn->key_type = 0xff;
376
377 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
378 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
379
380 switch (type) {
381 case ACL_LINK:
382 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
383 break;
384 case SCO_LINK:
385 if (lmp_esco_capable(hdev))
386 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
387 (hdev->esco_type & EDR_ESCO_MASK);
388 else
389 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
390 break;
391 case ESCO_LINK:
392 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
393 break;
394 }
395
396 skb_queue_head_init(&conn->data_q);
397
398 INIT_LIST_HEAD(&conn->chan_list);
399
400 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
401 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
402 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
403 (unsigned long) conn);
404
405 atomic_set(&conn->refcnt, 0);
406
407 hci_dev_hold(hdev);
408
409 hci_conn_hash_add(hdev, conn);
410 if (hdev->notify)
411 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
412
413 hci_conn_init_sysfs(conn);
414
415 return conn;
416 }
417
418 int hci_conn_del(struct hci_conn *conn)
419 {
420 struct hci_dev *hdev = conn->hdev;
421
422 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
423
424 del_timer(&conn->idle_timer);
425
426 cancel_delayed_work_sync(&conn->disc_work);
427
428 del_timer(&conn->auto_accept_timer);
429
430 if (conn->type == ACL_LINK) {
431 struct hci_conn *sco = conn->link;
432 if (sco)
433 sco->link = NULL;
434
435 /* Unacked frames */
436 hdev->acl_cnt += conn->sent;
437 } else if (conn->type == LE_LINK) {
438 if (hdev->le_pkts)
439 hdev->le_cnt += conn->sent;
440 else
441 hdev->acl_cnt += conn->sent;
442 } else {
443 struct hci_conn *acl = conn->link;
444 if (acl) {
445 acl->link = NULL;
446 hci_conn_drop(acl);
447 }
448 }
449
450 hci_chan_list_flush(conn);
451
452 if (conn->amp_mgr)
453 amp_mgr_put(conn->amp_mgr);
454
455 hci_conn_hash_del(hdev, conn);
456 if (hdev->notify)
457 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
458
459 skb_queue_purge(&conn->data_q);
460
461 hci_conn_del_sysfs(conn);
462
463 hci_dev_put(hdev);
464
465 hci_conn_put(conn);
466
467 return 0;
468 }
469
470 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
471 {
472 int use_src = bacmp(src, BDADDR_ANY);
473 struct hci_dev *hdev = NULL, *d;
474
475 BT_DBG("%pMR -> %pMR", src, dst);
476
477 read_lock(&hci_dev_list_lock);
478
479 list_for_each_entry(d, &hci_dev_list, list) {
480 if (!test_bit(HCI_UP, &d->flags) ||
481 test_bit(HCI_RAW, &d->flags) ||
482 d->dev_type != HCI_BREDR)
483 continue;
484
485 /* Simple routing:
486 * No source address - find interface with bdaddr != dst
487 * Source address - find interface with bdaddr == src
488 */
489
490 if (use_src) {
491 if (!bacmp(&d->bdaddr, src)) {
492 hdev = d; break;
493 }
494 } else {
495 if (bacmp(&d->bdaddr, dst)) {
496 hdev = d; break;
497 }
498 }
499 }
500
501 if (hdev)
502 hdev = hci_dev_hold(hdev);
503
504 read_unlock(&hci_dev_list_lock);
505 return hdev;
506 }
507 EXPORT_SYMBOL(hci_get_route);
508
509 static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
510 u8 dst_type, u8 sec_level, u8 auth_type)
511 {
512 struct hci_conn *le;
513
514 if (test_bit(HCI_LE_PERIPHERAL, &hdev->flags))
515 return ERR_PTR(-ENOTSUPP);
516
517 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
518 if (!le) {
519 le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
520 if (le)
521 return ERR_PTR(-EBUSY);
522
523 le = hci_conn_add(hdev, LE_LINK, dst);
524 if (!le)
525 return ERR_PTR(-ENOMEM);
526
527 le->dst_type = bdaddr_to_le(dst_type);
528 hci_le_create_connection(le);
529 }
530
531 le->pending_sec_level = sec_level;
532 le->auth_type = auth_type;
533
534 hci_conn_hold(le);
535
536 return le;
537 }
538
539 static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
540 u8 sec_level, u8 auth_type)
541 {
542 struct hci_conn *acl;
543
544 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
545 if (!acl) {
546 acl = hci_conn_add(hdev, ACL_LINK, dst);
547 if (!acl)
548 return ERR_PTR(-ENOMEM);
549 }
550
551 hci_conn_hold(acl);
552
553 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
554 acl->sec_level = BT_SECURITY_LOW;
555 acl->pending_sec_level = sec_level;
556 acl->auth_type = auth_type;
557 hci_acl_create_connection(acl);
558 }
559
560 return acl;
561 }
562
563 static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
564 bdaddr_t *dst, u8 sec_level, u8 auth_type)
565 {
566 struct hci_conn *acl;
567 struct hci_conn *sco;
568
569 acl = hci_connect_acl(hdev, dst, sec_level, auth_type);
570 if (IS_ERR(acl))
571 return acl;
572
573 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
574 if (!sco) {
575 sco = hci_conn_add(hdev, type, dst);
576 if (!sco) {
577 hci_conn_drop(acl);
578 return ERR_PTR(-ENOMEM);
579 }
580 }
581
582 acl->link = sco;
583 sco->link = acl;
584
585 hci_conn_hold(sco);
586
587 if (acl->state == BT_CONNECTED &&
588 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
589 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
590 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
591
592 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
593 /* defer SCO setup until mode change completed */
594 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
595 return sco;
596 }
597
598 hci_sco_setup(acl, 0x00);
599 }
600
601 return sco;
602 }
603
604 /* Create SCO, ACL or LE connection. */
605 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
606 __u8 dst_type, __u8 sec_level, __u8 auth_type)
607 {
608 BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type);
609
610 switch (type) {
611 case LE_LINK:
612 return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
613 case ACL_LINK:
614 return hci_connect_acl(hdev, dst, sec_level, auth_type);
615 case SCO_LINK:
616 case ESCO_LINK:
617 return hci_connect_sco(hdev, type, dst, sec_level, auth_type);
618 }
619
620 return ERR_PTR(-EINVAL);
621 }
622
623 /* Check link security requirement */
624 int hci_conn_check_link_mode(struct hci_conn *conn)
625 {
626 BT_DBG("hcon %p", conn);
627
628 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
629 return 0;
630
631 return 1;
632 }
633
634 /* Authenticate remote device */
635 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
636 {
637 BT_DBG("hcon %p", conn);
638
639 if (conn->pending_sec_level > sec_level)
640 sec_level = conn->pending_sec_level;
641
642 if (sec_level > conn->sec_level)
643 conn->pending_sec_level = sec_level;
644 else if (conn->link_mode & HCI_LM_AUTH)
645 return 1;
646
647 /* Make sure we preserve an existing MITM requirement*/
648 auth_type |= (conn->auth_type & 0x01);
649
650 conn->auth_type = auth_type;
651
652 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
653 struct hci_cp_auth_requested cp;
654
655 /* encrypt must be pending if auth is also pending */
656 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
657
658 cp.handle = cpu_to_le16(conn->handle);
659 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
660 sizeof(cp), &cp);
661 if (conn->key_type != 0xff)
662 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
663 }
664
665 return 0;
666 }
667
668 /* Encrypt the the link */
669 static void hci_conn_encrypt(struct hci_conn *conn)
670 {
671 BT_DBG("hcon %p", conn);
672
673 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
674 struct hci_cp_set_conn_encrypt cp;
675 cp.handle = cpu_to_le16(conn->handle);
676 cp.encrypt = 0x01;
677 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
678 &cp);
679 }
680 }
681
682 /* Enable security */
683 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
684 {
685 BT_DBG("hcon %p", conn);
686
687 if (conn->type == LE_LINK)
688 return smp_conn_security(conn, sec_level);
689
690 /* For sdp we don't need the link key. */
691 if (sec_level == BT_SECURITY_SDP)
692 return 1;
693
694 /* For non 2.1 devices and low security level we don't need the link
695 key. */
696 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
697 return 1;
698
699 /* For other security levels we need the link key. */
700 if (!(conn->link_mode & HCI_LM_AUTH))
701 goto auth;
702
703 /* An authenticated combination key has sufficient security for any
704 security level. */
705 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
706 goto encrypt;
707
708 /* An unauthenticated combination key has sufficient security for
709 security level 1 and 2. */
710 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
711 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
712 goto encrypt;
713
714 /* A combination key has always sufficient security for the security
715 levels 1 or 2. High security level requires the combination key
716 is generated using maximum PIN code length (16).
717 For pre 2.1 units. */
718 if (conn->key_type == HCI_LK_COMBINATION &&
719 (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
720 goto encrypt;
721
722 auth:
723 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
724 return 0;
725
726 if (!hci_conn_auth(conn, sec_level, auth_type))
727 return 0;
728
729 encrypt:
730 if (conn->link_mode & HCI_LM_ENCRYPT)
731 return 1;
732
733 hci_conn_encrypt(conn);
734 return 0;
735 }
736 EXPORT_SYMBOL(hci_conn_security);
737
738 /* Check secure link requirement */
739 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
740 {
741 BT_DBG("hcon %p", conn);
742
743 if (sec_level != BT_SECURITY_HIGH)
744 return 1; /* Accept if non-secure is required */
745
746 if (conn->sec_level == BT_SECURITY_HIGH)
747 return 1;
748
749 return 0; /* Reject not secure link */
750 }
751 EXPORT_SYMBOL(hci_conn_check_secure);
752
753 /* Change link key */
754 int hci_conn_change_link_key(struct hci_conn *conn)
755 {
756 BT_DBG("hcon %p", conn);
757
758 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
759 struct hci_cp_change_conn_link_key cp;
760 cp.handle = cpu_to_le16(conn->handle);
761 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
762 sizeof(cp), &cp);
763 }
764
765 return 0;
766 }
767
768 /* Switch role */
769 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
770 {
771 BT_DBG("hcon %p", conn);
772
773 if (!role && conn->link_mode & HCI_LM_MASTER)
774 return 1;
775
776 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
777 struct hci_cp_switch_role cp;
778 bacpy(&cp.bdaddr, &conn->dst);
779 cp.role = role;
780 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
781 }
782
783 return 0;
784 }
785 EXPORT_SYMBOL(hci_conn_switch_role);
786
787 /* Enter active mode */
788 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
789 {
790 struct hci_dev *hdev = conn->hdev;
791
792 BT_DBG("hcon %p mode %d", conn, conn->mode);
793
794 if (test_bit(HCI_RAW, &hdev->flags))
795 return;
796
797 if (conn->mode != HCI_CM_SNIFF)
798 goto timer;
799
800 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
801 goto timer;
802
803 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
804 struct hci_cp_exit_sniff_mode cp;
805 cp.handle = cpu_to_le16(conn->handle);
806 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
807 }
808
809 timer:
810 if (hdev->idle_timeout > 0)
811 mod_timer(&conn->idle_timer,
812 jiffies + msecs_to_jiffies(hdev->idle_timeout));
813 }
814
815 /* Drop all connection on the device */
816 void hci_conn_hash_flush(struct hci_dev *hdev)
817 {
818 struct hci_conn_hash *h = &hdev->conn_hash;
819 struct hci_conn *c, *n;
820
821 BT_DBG("hdev %s", hdev->name);
822
823 list_for_each_entry_safe(c, n, &h->list, list) {
824 c->state = BT_CLOSED;
825
826 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
827 hci_conn_del(c);
828 }
829 }
830
831 /* Check pending connect attempts */
832 void hci_conn_check_pending(struct hci_dev *hdev)
833 {
834 struct hci_conn *conn;
835
836 BT_DBG("hdev %s", hdev->name);
837
838 hci_dev_lock(hdev);
839
840 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
841 if (conn)
842 hci_acl_create_connection(conn);
843
844 hci_dev_unlock(hdev);
845 }
846
847 int hci_get_conn_list(void __user *arg)
848 {
849 struct hci_conn *c;
850 struct hci_conn_list_req req, *cl;
851 struct hci_conn_info *ci;
852 struct hci_dev *hdev;
853 int n = 0, size, err;
854
855 if (copy_from_user(&req, arg, sizeof(req)))
856 return -EFAULT;
857
858 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
859 return -EINVAL;
860
861 size = sizeof(req) + req.conn_num * sizeof(*ci);
862
863 cl = kmalloc(size, GFP_KERNEL);
864 if (!cl)
865 return -ENOMEM;
866
867 hdev = hci_dev_get(req.dev_id);
868 if (!hdev) {
869 kfree(cl);
870 return -ENODEV;
871 }
872
873 ci = cl->conn_info;
874
875 hci_dev_lock(hdev);
876 list_for_each_entry(c, &hdev->conn_hash.list, list) {
877 bacpy(&(ci + n)->bdaddr, &c->dst);
878 (ci + n)->handle = c->handle;
879 (ci + n)->type = c->type;
880 (ci + n)->out = c->out;
881 (ci + n)->state = c->state;
882 (ci + n)->link_mode = c->link_mode;
883 if (++n >= req.conn_num)
884 break;
885 }
886 hci_dev_unlock(hdev);
887
888 cl->dev_id = hdev->id;
889 cl->conn_num = n;
890 size = sizeof(req) + n * sizeof(*ci);
891
892 hci_dev_put(hdev);
893
894 err = copy_to_user(arg, cl, size);
895 kfree(cl);
896
897 return err ? -EFAULT : 0;
898 }
899
900 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
901 {
902 struct hci_conn_info_req req;
903 struct hci_conn_info ci;
904 struct hci_conn *conn;
905 char __user *ptr = arg + sizeof(req);
906
907 if (copy_from_user(&req, arg, sizeof(req)))
908 return -EFAULT;
909
910 hci_dev_lock(hdev);
911 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
912 if (conn) {
913 bacpy(&ci.bdaddr, &conn->dst);
914 ci.handle = conn->handle;
915 ci.type = conn->type;
916 ci.out = conn->out;
917 ci.state = conn->state;
918 ci.link_mode = conn->link_mode;
919 }
920 hci_dev_unlock(hdev);
921
922 if (!conn)
923 return -ENOENT;
924
925 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
926 }
927
928 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
929 {
930 struct hci_auth_info_req req;
931 struct hci_conn *conn;
932
933 if (copy_from_user(&req, arg, sizeof(req)))
934 return -EFAULT;
935
936 hci_dev_lock(hdev);
937 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
938 if (conn)
939 req.type = conn->auth_type;
940 hci_dev_unlock(hdev);
941
942 if (!conn)
943 return -ENOENT;
944
945 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
946 }
947
948 struct hci_chan *hci_chan_create(struct hci_conn *conn)
949 {
950 struct hci_dev *hdev = conn->hdev;
951 struct hci_chan *chan;
952
953 BT_DBG("%s hcon %p", hdev->name, conn);
954
955 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
956 if (!chan)
957 return NULL;
958
959 chan->conn = conn;
960 skb_queue_head_init(&chan->data_q);
961 chan->state = BT_CONNECTED;
962
963 list_add_rcu(&chan->list, &conn->chan_list);
964
965 return chan;
966 }
967
968 void hci_chan_del(struct hci_chan *chan)
969 {
970 struct hci_conn *conn = chan->conn;
971 struct hci_dev *hdev = conn->hdev;
972
973 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
974
975 list_del_rcu(&chan->list);
976
977 synchronize_rcu();
978
979 hci_conn_drop(conn);
980
981 skb_queue_purge(&chan->data_q);
982 kfree(chan);
983 }
984
985 void hci_chan_list_flush(struct hci_conn *conn)
986 {
987 struct hci_chan *chan, *n;
988
989 BT_DBG("hcon %p", conn);
990
991 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
992 hci_chan_del(chan);
993 }
994
995 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
996 __u16 handle)
997 {
998 struct hci_chan *hchan;
999
1000 list_for_each_entry(hchan, &hcon->chan_list, list) {
1001 if (hchan->handle == handle)
1002 return hchan;
1003 }
1004
1005 return NULL;
1006 }
1007
1008 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1009 {
1010 struct hci_conn_hash *h = &hdev->conn_hash;
1011 struct hci_conn *hcon;
1012 struct hci_chan *hchan = NULL;
1013
1014 rcu_read_lock();
1015
1016 list_for_each_entry_rcu(hcon, &h->list, list) {
1017 hchan = __hci_chan_lookup_handle(hcon, handle);
1018 if (hchan)
1019 break;
1020 }
1021
1022 rcu_read_unlock();
1023
1024 return hchan;
1025 }