Bluetooth: Fix crash in l2cap_build_cmd() with small MTU
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
77a63e0a
FW
82static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
75e84b7c
JH
84{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
7b1abbbe
JH
107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
75e84b7c
JH
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
7b1abbbe 137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 138 const void *param, u8 event, u32 timeout)
75e84b7c
JH
139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
7b1abbbe 148 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
7b1abbbe
JH
187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 192 const void *param, u32 timeout)
7b1abbbe
JH
193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
1da177e4 198/* Execute request and wait for completion. */
01178cd4 199static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
200 void (*func)(struct hci_request *req,
201 unsigned long opt),
01178cd4 202 unsigned long opt, __u32 timeout)
1da177e4 203{
42c6b129 204 struct hci_request req;
1da177e4
LT
205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
42c6b129
JH
210 hci_req_init(&req, hdev);
211
1da177e4
LT
212 hdev->req_status = HCI_REQ_PEND;
213
42c6b129 214 func(&req, opt);
53cce22d 215
42c6b129
JH
216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
53cce22d 218 hdev->req_status = 0;
920c8300
AG
219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
42c6b129 224 */
920c8300
AG
225 if (err == -ENODATA)
226 return 0;
227
228 return err;
53cce22d
JH
229 }
230
bc4445c7
AG
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
1da177e4
LT
234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
e175072f 243 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
3ff50b79 253 }
1da177e4 254
a5040efa 255 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
01178cd4 262static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
263 void (*req)(struct hci_request *req,
264 unsigned long opt),
01178cd4 265 unsigned long opt, __u32 timeout)
1da177e4
LT
266{
267 int ret;
268
7c6a329e
MH
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
1da177e4
LT
272 /* Serialize all requests */
273 hci_req_lock(hdev);
01178cd4 274 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
42c6b129 280static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 281{
42c6b129 282 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
283
284 /* Reset device */
42c6b129
JH
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
287}
288
42c6b129 289static void bredr_init(struct hci_request *req)
1da177e4 290{
42c6b129 291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 292
1da177e4 293 /* Read Local Supported Features */
42c6b129 294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 295
1143e5a6 296 /* Read Local Version */
42c6b129 297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
298
299 /* Read BD Address */
42c6b129 300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
301}
302
42c6b129 303static void amp_init(struct hci_request *req)
e61ef499 304{
42c6b129 305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 306
e61ef499 307 /* Read Local Version */
42c6b129 308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
309
310 /* Read Local AMP Info */
42c6b129 311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
312
313 /* Read Data Blk size */
42c6b129 314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
315}
316
42c6b129 317static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 318{
42c6b129 319 struct hci_dev *hdev = req->hdev;
e61ef499
AE
320
321 BT_DBG("%s %ld", hdev->name, opt);
322
11778716
AE
323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 325 hci_reset_req(req, 0);
11778716 326
e61ef499
AE
327 switch (hdev->dev_type) {
328 case HCI_BREDR:
42c6b129 329 bredr_init(req);
e61ef499
AE
330 break;
331
332 case HCI_AMP:
42c6b129 333 amp_init(req);
e61ef499
AE
334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
e61ef499
AE
340}
341
42c6b129 342static void bredr_setup(struct hci_request *req)
2177bab5
JH
343{
344 struct hci_cp_delete_stored_link_key cp;
345 __le16 param;
346 __u8 flt_type;
347
348 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 349 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
350
351 /* Read Class of Device */
42c6b129 352 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
353
354 /* Read Local Name */
42c6b129 355 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
356
357 /* Read Voice Setting */
42c6b129 358 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
359
360 /* Clear Event Filters */
361 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 362 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
363
364 /* Connection accept timeout ~20 secs */
365 param = __constant_cpu_to_le16(0x7d00);
42c6b129 366 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
367
368 bacpy(&cp.bdaddr, BDADDR_ANY);
369 cp.delete_all = 0x01;
42c6b129 370 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
f332ec66
JH
371
372 /* Read page scan parameters */
373 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
375 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
376 }
2177bab5
JH
377}
378
42c6b129 379static void le_setup(struct hci_request *req)
2177bab5 380{
c73eee91
JH
381 struct hci_dev *hdev = req->hdev;
382
2177bab5 383 /* Read LE Buffer Size */
42c6b129 384 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
385
386 /* Read LE Local Supported Features */
42c6b129 387 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
388
389 /* Read LE Advertising Channel TX Power */
42c6b129 390 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
391
392 /* Read LE White List Size */
42c6b129 393 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
394
395 /* Read LE Supported States */
42c6b129 396 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
397
398 /* LE-only controllers have LE implicitly enabled */
399 if (!lmp_bredr_capable(hdev))
400 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
401}
402
403static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
404{
405 if (lmp_ext_inq_capable(hdev))
406 return 0x02;
407
408 if (lmp_inq_rssi_capable(hdev))
409 return 0x01;
410
411 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
412 hdev->lmp_subver == 0x0757)
413 return 0x01;
414
415 if (hdev->manufacturer == 15) {
416 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
417 return 0x01;
418 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
419 return 0x01;
420 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
421 return 0x01;
422 }
423
424 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
425 hdev->lmp_subver == 0x1805)
426 return 0x01;
427
428 return 0x00;
429}
430
42c6b129 431static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
432{
433 u8 mode;
434
42c6b129 435 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 436
42c6b129 437 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
438}
439
42c6b129 440static void hci_setup_event_mask(struct hci_request *req)
2177bab5 441{
42c6b129
JH
442 struct hci_dev *hdev = req->hdev;
443
2177bab5
JH
444 /* The second byte is 0xff instead of 0x9f (two reserved bits
445 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
446 * command otherwise.
447 */
448 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
449
450 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
451 * any event mask for pre 1.2 devices.
452 */
453 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
454 return;
455
456 if (lmp_bredr_capable(hdev)) {
457 events[4] |= 0x01; /* Flow Specification Complete */
458 events[4] |= 0x02; /* Inquiry Result with RSSI */
459 events[4] |= 0x04; /* Read Remote Extended Features Complete */
460 events[5] |= 0x08; /* Synchronous Connection Complete */
461 events[5] |= 0x10; /* Synchronous Connection Changed */
462 }
463
464 if (lmp_inq_rssi_capable(hdev))
465 events[4] |= 0x02; /* Inquiry Result with RSSI */
466
467 if (lmp_sniffsubr_capable(hdev))
468 events[5] |= 0x20; /* Sniff Subrating */
469
470 if (lmp_pause_enc_capable(hdev))
471 events[5] |= 0x80; /* Encryption Key Refresh Complete */
472
473 if (lmp_ext_inq_capable(hdev))
474 events[5] |= 0x40; /* Extended Inquiry Result */
475
476 if (lmp_no_flush_capable(hdev))
477 events[7] |= 0x01; /* Enhanced Flush Complete */
478
479 if (lmp_lsto_capable(hdev))
480 events[6] |= 0x80; /* Link Supervision Timeout Changed */
481
482 if (lmp_ssp_capable(hdev)) {
483 events[6] |= 0x01; /* IO Capability Request */
484 events[6] |= 0x02; /* IO Capability Response */
485 events[6] |= 0x04; /* User Confirmation Request */
486 events[6] |= 0x08; /* User Passkey Request */
487 events[6] |= 0x10; /* Remote OOB Data Request */
488 events[6] |= 0x20; /* Simple Pairing Complete */
489 events[7] |= 0x04; /* User Passkey Notification */
490 events[7] |= 0x08; /* Keypress Notification */
491 events[7] |= 0x10; /* Remote Host Supported
492 * Features Notification
493 */
494 }
495
496 if (lmp_le_capable(hdev))
497 events[7] |= 0x20; /* LE Meta-Event */
498
42c6b129 499 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
500
501 if (lmp_le_capable(hdev)) {
502 memset(events, 0, sizeof(events));
503 events[0] = 0x1f;
42c6b129
JH
504 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
505 sizeof(events), events);
2177bab5
JH
506 }
507}
508
42c6b129 509static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 510{
42c6b129
JH
511 struct hci_dev *hdev = req->hdev;
512
2177bab5 513 if (lmp_bredr_capable(hdev))
42c6b129 514 bredr_setup(req);
2177bab5
JH
515
516 if (lmp_le_capable(hdev))
42c6b129 517 le_setup(req);
2177bab5 518
42c6b129 519 hci_setup_event_mask(req);
2177bab5
JH
520
521 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 522 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
523
524 if (lmp_ssp_capable(hdev)) {
525 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
526 u8 mode = 0x01;
42c6b129
JH
527 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
528 sizeof(mode), &mode);
2177bab5
JH
529 } else {
530 struct hci_cp_write_eir cp;
531
532 memset(hdev->eir, 0, sizeof(hdev->eir));
533 memset(&cp, 0, sizeof(cp));
534
42c6b129 535 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
536 }
537 }
538
539 if (lmp_inq_rssi_capable(hdev))
42c6b129 540 hci_setup_inquiry_mode(req);
2177bab5
JH
541
542 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 543 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
544
545 if (lmp_ext_feat_capable(hdev)) {
546 struct hci_cp_read_local_ext_features cp;
547
548 cp.page = 0x01;
42c6b129
JH
549 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
550 sizeof(cp), &cp);
2177bab5
JH
551 }
552
553 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
554 u8 enable = 1;
42c6b129
JH
555 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
556 &enable);
2177bab5
JH
557 }
558}
559
42c6b129 560static void hci_setup_link_policy(struct hci_request *req)
2177bab5 561{
42c6b129 562 struct hci_dev *hdev = req->hdev;
2177bab5
JH
563 struct hci_cp_write_def_link_policy cp;
564 u16 link_policy = 0;
565
566 if (lmp_rswitch_capable(hdev))
567 link_policy |= HCI_LP_RSWITCH;
568 if (lmp_hold_capable(hdev))
569 link_policy |= HCI_LP_HOLD;
570 if (lmp_sniff_capable(hdev))
571 link_policy |= HCI_LP_SNIFF;
572 if (lmp_park_capable(hdev))
573 link_policy |= HCI_LP_PARK;
574
575 cp.policy = cpu_to_le16(link_policy);
42c6b129 576 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
577}
578
42c6b129 579static void hci_set_le_support(struct hci_request *req)
2177bab5 580{
42c6b129 581 struct hci_dev *hdev = req->hdev;
2177bab5
JH
582 struct hci_cp_write_le_host_supported cp;
583
c73eee91
JH
584 /* LE-only devices do not support explicit enablement */
585 if (!lmp_bredr_capable(hdev))
586 return;
587
2177bab5
JH
588 memset(&cp, 0, sizeof(cp));
589
590 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
591 cp.le = 0x01;
592 cp.simul = lmp_le_br_capable(hdev);
593 }
594
595 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
596 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
597 &cp);
2177bab5
JH
598}
599
42c6b129 600static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 601{
42c6b129 602 struct hci_dev *hdev = req->hdev;
d2c5d77f 603 u8 p;
42c6b129 604
2177bab5 605 if (hdev->commands[5] & 0x10)
42c6b129 606 hci_setup_link_policy(req);
2177bab5 607
04b4edcb 608 if (lmp_le_capable(hdev)) {
42c6b129 609 hci_set_le_support(req);
04b4edcb
JH
610 hci_update_ad(req);
611 }
d2c5d77f
JH
612
613 /* Read features beyond page 1 if available */
614 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
615 struct hci_cp_read_local_ext_features cp;
616
617 cp.page = p;
618 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
619 sizeof(cp), &cp);
620 }
2177bab5
JH
621}
622
623static int __hci_init(struct hci_dev *hdev)
624{
625 int err;
626
627 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
628 if (err < 0)
629 return err;
630
631 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
632 * BR/EDR/LE type controllers. AMP controllers only need the
633 * first stage init.
634 */
635 if (hdev->dev_type != HCI_BREDR)
636 return 0;
637
638 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
639 if (err < 0)
640 return err;
641
642 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
643}
644
42c6b129 645static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
646{
647 __u8 scan = opt;
648
42c6b129 649 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
650
651 /* Inquiry and Page scans */
42c6b129 652 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
653}
654
42c6b129 655static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
656{
657 __u8 auth = opt;
658
42c6b129 659 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
660
661 /* Authentication */
42c6b129 662 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
663}
664
42c6b129 665static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
666{
667 __u8 encrypt = opt;
668
42c6b129 669 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 670
e4e8e37c 671 /* Encryption */
42c6b129 672 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
673}
674
42c6b129 675static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
676{
677 __le16 policy = cpu_to_le16(opt);
678
42c6b129 679 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
680
681 /* Default link policy */
42c6b129 682 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
683}
684
8e87d142 685/* Get HCI device by index.
1da177e4
LT
686 * Device is held on return. */
687struct hci_dev *hci_dev_get(int index)
688{
8035ded4 689 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
690
691 BT_DBG("%d", index);
692
693 if (index < 0)
694 return NULL;
695
696 read_lock(&hci_dev_list_lock);
8035ded4 697 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
698 if (d->id == index) {
699 hdev = hci_dev_hold(d);
700 break;
701 }
702 }
703 read_unlock(&hci_dev_list_lock);
704 return hdev;
705}
1da177e4
LT
706
707/* ---- Inquiry support ---- */
ff9ef578 708
30dc78e1
JH
709bool hci_discovery_active(struct hci_dev *hdev)
710{
711 struct discovery_state *discov = &hdev->discovery;
712
6fbe195d 713 switch (discov->state) {
343f935b 714 case DISCOVERY_FINDING:
6fbe195d 715 case DISCOVERY_RESOLVING:
30dc78e1
JH
716 return true;
717
6fbe195d
AG
718 default:
719 return false;
720 }
30dc78e1
JH
721}
722
ff9ef578
JH
723void hci_discovery_set_state(struct hci_dev *hdev, int state)
724{
725 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
726
727 if (hdev->discovery.state == state)
728 return;
729
730 switch (state) {
731 case DISCOVERY_STOPPED:
7b99b659
AG
732 if (hdev->discovery.state != DISCOVERY_STARTING)
733 mgmt_discovering(hdev, 0);
ff9ef578
JH
734 break;
735 case DISCOVERY_STARTING:
736 break;
343f935b 737 case DISCOVERY_FINDING:
ff9ef578
JH
738 mgmt_discovering(hdev, 1);
739 break;
30dc78e1
JH
740 case DISCOVERY_RESOLVING:
741 break;
ff9ef578
JH
742 case DISCOVERY_STOPPING:
743 break;
744 }
745
746 hdev->discovery.state = state;
747}
748
1da177e4
LT
749static void inquiry_cache_flush(struct hci_dev *hdev)
750{
30883512 751 struct discovery_state *cache = &hdev->discovery;
b57c1a56 752 struct inquiry_entry *p, *n;
1da177e4 753
561aafbc
JH
754 list_for_each_entry_safe(p, n, &cache->all, all) {
755 list_del(&p->all);
b57c1a56 756 kfree(p);
1da177e4 757 }
561aafbc
JH
758
759 INIT_LIST_HEAD(&cache->unknown);
760 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
761}
762
a8c5fb1a
GP
763struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
764 bdaddr_t *bdaddr)
1da177e4 765{
30883512 766 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
767 struct inquiry_entry *e;
768
6ed93dc6 769 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 770
561aafbc
JH
771 list_for_each_entry(e, &cache->all, all) {
772 if (!bacmp(&e->data.bdaddr, bdaddr))
773 return e;
774 }
775
776 return NULL;
777}
778
779struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 780 bdaddr_t *bdaddr)
561aafbc 781{
30883512 782 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
783 struct inquiry_entry *e;
784
6ed93dc6 785 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
786
787 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 788 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
789 return e;
790 }
791
792 return NULL;
1da177e4
LT
793}
794
30dc78e1 795struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
796 bdaddr_t *bdaddr,
797 int state)
30dc78e1
JH
798{
799 struct discovery_state *cache = &hdev->discovery;
800 struct inquiry_entry *e;
801
6ed93dc6 802 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
803
804 list_for_each_entry(e, &cache->resolve, list) {
805 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
806 return e;
807 if (!bacmp(&e->data.bdaddr, bdaddr))
808 return e;
809 }
810
811 return NULL;
812}
813
a3d4e20a 814void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 815 struct inquiry_entry *ie)
a3d4e20a
JH
816{
817 struct discovery_state *cache = &hdev->discovery;
818 struct list_head *pos = &cache->resolve;
819 struct inquiry_entry *p;
820
821 list_del(&ie->list);
822
823 list_for_each_entry(p, &cache->resolve, list) {
824 if (p->name_state != NAME_PENDING &&
a8c5fb1a 825 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
826 break;
827 pos = &p->list;
828 }
829
830 list_add(&ie->list, pos);
831}
832
3175405b 833bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 834 bool name_known, bool *ssp)
1da177e4 835{
30883512 836 struct discovery_state *cache = &hdev->discovery;
70f23020 837 struct inquiry_entry *ie;
1da177e4 838
6ed93dc6 839 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 840
2b2fec4d
SJ
841 hci_remove_remote_oob_data(hdev, &data->bdaddr);
842
388fc8fa
JH
843 if (ssp)
844 *ssp = data->ssp_mode;
845
70f23020 846 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 847 if (ie) {
388fc8fa
JH
848 if (ie->data.ssp_mode && ssp)
849 *ssp = true;
850
a3d4e20a 851 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 852 data->rssi != ie->data.rssi) {
a3d4e20a
JH
853 ie->data.rssi = data->rssi;
854 hci_inquiry_cache_update_resolve(hdev, ie);
855 }
856
561aafbc 857 goto update;
a3d4e20a 858 }
561aafbc
JH
859
860 /* Entry not in the cache. Add new one. */
861 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
862 if (!ie)
3175405b 863 return false;
561aafbc
JH
864
865 list_add(&ie->all, &cache->all);
866
867 if (name_known) {
868 ie->name_state = NAME_KNOWN;
869 } else {
870 ie->name_state = NAME_NOT_KNOWN;
871 list_add(&ie->list, &cache->unknown);
872 }
70f23020 873
561aafbc
JH
874update:
875 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 876 ie->name_state != NAME_PENDING) {
561aafbc
JH
877 ie->name_state = NAME_KNOWN;
878 list_del(&ie->list);
1da177e4
LT
879 }
880
70f23020
AE
881 memcpy(&ie->data, data, sizeof(*data));
882 ie->timestamp = jiffies;
1da177e4 883 cache->timestamp = jiffies;
3175405b
JH
884
885 if (ie->name_state == NAME_NOT_KNOWN)
886 return false;
887
888 return true;
1da177e4
LT
889}
890
891static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
892{
30883512 893 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
894 struct inquiry_info *info = (struct inquiry_info *) buf;
895 struct inquiry_entry *e;
896 int copied = 0;
897
561aafbc 898 list_for_each_entry(e, &cache->all, all) {
1da177e4 899 struct inquiry_data *data = &e->data;
b57c1a56
JH
900
901 if (copied >= num)
902 break;
903
1da177e4
LT
904 bacpy(&info->bdaddr, &data->bdaddr);
905 info->pscan_rep_mode = data->pscan_rep_mode;
906 info->pscan_period_mode = data->pscan_period_mode;
907 info->pscan_mode = data->pscan_mode;
908 memcpy(info->dev_class, data->dev_class, 3);
909 info->clock_offset = data->clock_offset;
b57c1a56 910
1da177e4 911 info++;
b57c1a56 912 copied++;
1da177e4
LT
913 }
914
915 BT_DBG("cache %p, copied %d", cache, copied);
916 return copied;
917}
918
42c6b129 919static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
920{
921 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 922 struct hci_dev *hdev = req->hdev;
1da177e4
LT
923 struct hci_cp_inquiry cp;
924
925 BT_DBG("%s", hdev->name);
926
927 if (test_bit(HCI_INQUIRY, &hdev->flags))
928 return;
929
930 /* Start Inquiry */
931 memcpy(&cp.lap, &ir->lap, 3);
932 cp.length = ir->length;
933 cp.num_rsp = ir->num_rsp;
42c6b129 934 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
935}
936
3e13fa1e
AG
937static int wait_inquiry(void *word)
938{
939 schedule();
940 return signal_pending(current);
941}
942
1da177e4
LT
943int hci_inquiry(void __user *arg)
944{
945 __u8 __user *ptr = arg;
946 struct hci_inquiry_req ir;
947 struct hci_dev *hdev;
948 int err = 0, do_inquiry = 0, max_rsp;
949 long timeo;
950 __u8 *buf;
951
952 if (copy_from_user(&ir, ptr, sizeof(ir)))
953 return -EFAULT;
954
5a08ecce
AE
955 hdev = hci_dev_get(ir.dev_id);
956 if (!hdev)
1da177e4
LT
957 return -ENODEV;
958
09fd0de5 959 hci_dev_lock(hdev);
8e87d142 960 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 961 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
962 inquiry_cache_flush(hdev);
963 do_inquiry = 1;
964 }
09fd0de5 965 hci_dev_unlock(hdev);
1da177e4 966
04837f64 967 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
968
969 if (do_inquiry) {
01178cd4
JH
970 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
971 timeo);
70f23020
AE
972 if (err < 0)
973 goto done;
3e13fa1e
AG
974
975 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
976 * cleared). If it is interrupted by a signal, return -EINTR.
977 */
978 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
979 TASK_INTERRUPTIBLE))
980 return -EINTR;
70f23020 981 }
1da177e4 982
8fc9ced3
GP
983 /* for unlimited number of responses we will use buffer with
984 * 255 entries
985 */
1da177e4
LT
986 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
987
988 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
989 * copy it to the user space.
990 */
01df8c31 991 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 992 if (!buf) {
1da177e4
LT
993 err = -ENOMEM;
994 goto done;
995 }
996
09fd0de5 997 hci_dev_lock(hdev);
1da177e4 998 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 999 hci_dev_unlock(hdev);
1da177e4
LT
1000
1001 BT_DBG("num_rsp %d", ir.num_rsp);
1002
1003 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1004 ptr += sizeof(ir);
1005 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1006 ir.num_rsp))
1da177e4 1007 err = -EFAULT;
8e87d142 1008 } else
1da177e4
LT
1009 err = -EFAULT;
1010
1011 kfree(buf);
1012
1013done:
1014 hci_dev_put(hdev);
1015 return err;
1016}
1017
3f0f524b
JH
1018static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1019{
1020 u8 ad_len = 0, flags = 0;
1021 size_t name_len;
1022
1023 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1024 flags |= LE_AD_GENERAL;
1025
1026 if (!lmp_bredr_capable(hdev))
1027 flags |= LE_AD_NO_BREDR;
1028
1029 if (lmp_le_br_capable(hdev))
1030 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1031
1032 if (lmp_host_le_br_capable(hdev))
1033 flags |= LE_AD_SIM_LE_BREDR_HOST;
1034
1035 if (flags) {
1036 BT_DBG("adv flags 0x%02x", flags);
1037
1038 ptr[0] = 2;
1039 ptr[1] = EIR_FLAGS;
1040 ptr[2] = flags;
1041
1042 ad_len += 3;
1043 ptr += 3;
1044 }
1045
1046 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1047 ptr[0] = 2;
1048 ptr[1] = EIR_TX_POWER;
1049 ptr[2] = (u8) hdev->adv_tx_power;
1050
1051 ad_len += 3;
1052 ptr += 3;
1053 }
1054
1055 name_len = strlen(hdev->dev_name);
1056 if (name_len > 0) {
1057 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1058
1059 if (name_len > max_len) {
1060 name_len = max_len;
1061 ptr[1] = EIR_NAME_SHORT;
1062 } else
1063 ptr[1] = EIR_NAME_COMPLETE;
1064
1065 ptr[0] = name_len + 1;
1066
1067 memcpy(ptr + 2, hdev->dev_name, name_len);
1068
1069 ad_len += (name_len + 2);
1070 ptr += (name_len + 2);
1071 }
1072
1073 return ad_len;
1074}
1075
04b4edcb 1076void hci_update_ad(struct hci_request *req)
3f0f524b 1077{
04b4edcb 1078 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
1079 struct hci_cp_le_set_adv_data cp;
1080 u8 len;
3f0f524b 1081
04b4edcb
JH
1082 if (!lmp_le_capable(hdev))
1083 return;
3f0f524b
JH
1084
1085 memset(&cp, 0, sizeof(cp));
1086
1087 len = create_ad(hdev, cp.data);
1088
1089 if (hdev->adv_data_len == len &&
04b4edcb
JH
1090 memcmp(cp.data, hdev->adv_data, len) == 0)
1091 return;
3f0f524b
JH
1092
1093 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1094 hdev->adv_data_len = len;
1095
1096 cp.length = len;
3f0f524b 1097
04b4edcb 1098 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
1099}
1100
1da177e4
LT
1101/* ---- HCI ioctl helpers ---- */
1102
1103int hci_dev_open(__u16 dev)
1104{
1105 struct hci_dev *hdev;
1106 int ret = 0;
1107
5a08ecce
AE
1108 hdev = hci_dev_get(dev);
1109 if (!hdev)
1da177e4
LT
1110 return -ENODEV;
1111
1112 BT_DBG("%s %p", hdev->name, hdev);
1113
1114 hci_req_lock(hdev);
1115
94324962
JH
1116 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1117 ret = -ENODEV;
1118 goto done;
1119 }
1120
611b30f7
MH
1121 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1122 ret = -ERFKILL;
1123 goto done;
1124 }
1125
1da177e4
LT
1126 if (test_bit(HCI_UP, &hdev->flags)) {
1127 ret = -EALREADY;
1128 goto done;
1129 }
1130
1da177e4
LT
1131 if (hdev->open(hdev)) {
1132 ret = -EIO;
1133 goto done;
1134 }
1135
f41c70c4
MH
1136 atomic_set(&hdev->cmd_cnt, 1);
1137 set_bit(HCI_INIT, &hdev->flags);
1138
1139 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1140 ret = hdev->setup(hdev);
1141
1142 if (!ret) {
1143 /* Treat all non BR/EDR controllers as raw devices if
1144 * enable_hs is not set.
1145 */
1146 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1147 set_bit(HCI_RAW, &hdev->flags);
1148
1149 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1150 set_bit(HCI_RAW, &hdev->flags);
1151
1152 if (!test_bit(HCI_RAW, &hdev->flags))
1153 ret = __hci_init(hdev);
1da177e4
LT
1154 }
1155
f41c70c4
MH
1156 clear_bit(HCI_INIT, &hdev->flags);
1157
1da177e4
LT
1158 if (!ret) {
1159 hci_dev_hold(hdev);
1160 set_bit(HCI_UP, &hdev->flags);
1161 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a
AE
1162 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1163 mgmt_valid_hdev(hdev)) {
09fd0de5 1164 hci_dev_lock(hdev);
744cf19e 1165 mgmt_powered(hdev, 1);
09fd0de5 1166 hci_dev_unlock(hdev);
56e5cb86 1167 }
8e87d142 1168 } else {
1da177e4 1169 /* Init failed, cleanup */
3eff45ea 1170 flush_work(&hdev->tx_work);
c347b765 1171 flush_work(&hdev->cmd_work);
b78752cc 1172 flush_work(&hdev->rx_work);
1da177e4
LT
1173
1174 skb_queue_purge(&hdev->cmd_q);
1175 skb_queue_purge(&hdev->rx_q);
1176
1177 if (hdev->flush)
1178 hdev->flush(hdev);
1179
1180 if (hdev->sent_cmd) {
1181 kfree_skb(hdev->sent_cmd);
1182 hdev->sent_cmd = NULL;
1183 }
1184
1185 hdev->close(hdev);
1186 hdev->flags = 0;
1187 }
1188
1189done:
1190 hci_req_unlock(hdev);
1191 hci_dev_put(hdev);
1192 return ret;
1193}
1194
1195static int hci_dev_do_close(struct hci_dev *hdev)
1196{
1197 BT_DBG("%s %p", hdev->name, hdev);
1198
28b75a89
AG
1199 cancel_work_sync(&hdev->le_scan);
1200
78c04c0b
VCG
1201 cancel_delayed_work(&hdev->power_off);
1202
1da177e4
LT
1203 hci_req_cancel(hdev, ENODEV);
1204 hci_req_lock(hdev);
1205
1206 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1207 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1208 hci_req_unlock(hdev);
1209 return 0;
1210 }
1211
3eff45ea
GP
1212 /* Flush RX and TX works */
1213 flush_work(&hdev->tx_work);
b78752cc 1214 flush_work(&hdev->rx_work);
1da177e4 1215
16ab91ab 1216 if (hdev->discov_timeout > 0) {
e0f9309f 1217 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1218 hdev->discov_timeout = 0;
5e5282bb 1219 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1220 }
1221
a8b2d5c2 1222 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1223 cancel_delayed_work(&hdev->service_cache);
1224
7ba8b4be
AG
1225 cancel_delayed_work_sync(&hdev->le_scan_disable);
1226
09fd0de5 1227 hci_dev_lock(hdev);
1da177e4
LT
1228 inquiry_cache_flush(hdev);
1229 hci_conn_hash_flush(hdev);
09fd0de5 1230 hci_dev_unlock(hdev);
1da177e4
LT
1231
1232 hci_notify(hdev, HCI_DEV_DOWN);
1233
1234 if (hdev->flush)
1235 hdev->flush(hdev);
1236
1237 /* Reset device */
1238 skb_queue_purge(&hdev->cmd_q);
1239 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1240 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1241 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1242 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1243 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1244 clear_bit(HCI_INIT, &hdev->flags);
1245 }
1246
c347b765
GP
1247 /* flush cmd work */
1248 flush_work(&hdev->cmd_work);
1da177e4
LT
1249
1250 /* Drop queues */
1251 skb_queue_purge(&hdev->rx_q);
1252 skb_queue_purge(&hdev->cmd_q);
1253 skb_queue_purge(&hdev->raw_q);
1254
1255 /* Drop last sent command */
1256 if (hdev->sent_cmd) {
b79f44c1 1257 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1258 kfree_skb(hdev->sent_cmd);
1259 hdev->sent_cmd = NULL;
1260 }
1261
b6ddb638
JH
1262 kfree_skb(hdev->recv_evt);
1263 hdev->recv_evt = NULL;
1264
1da177e4
LT
1265 /* After this point our queues are empty
1266 * and no tasks are scheduled. */
1267 hdev->close(hdev);
1268
35b973c9
JH
1269 /* Clear flags */
1270 hdev->flags = 0;
1271 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1272
bb4b2a9a
AE
1273 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1274 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1275 hci_dev_lock(hdev);
1276 mgmt_powered(hdev, 0);
1277 hci_dev_unlock(hdev);
1278 }
5add6af8 1279
ced5c338
AE
1280 /* Controller radio is available but is currently powered down */
1281 hdev->amp_status = 0;
1282
e59fda8d 1283 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1284 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1285
1da177e4
LT
1286 hci_req_unlock(hdev);
1287
1288 hci_dev_put(hdev);
1289 return 0;
1290}
1291
1292int hci_dev_close(__u16 dev)
1293{
1294 struct hci_dev *hdev;
1295 int err;
1296
70f23020
AE
1297 hdev = hci_dev_get(dev);
1298 if (!hdev)
1da177e4 1299 return -ENODEV;
8ee56540
MH
1300
1301 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1302 cancel_delayed_work(&hdev->power_off);
1303
1da177e4 1304 err = hci_dev_do_close(hdev);
8ee56540 1305
1da177e4
LT
1306 hci_dev_put(hdev);
1307 return err;
1308}
1309
1310int hci_dev_reset(__u16 dev)
1311{
1312 struct hci_dev *hdev;
1313 int ret = 0;
1314
70f23020
AE
1315 hdev = hci_dev_get(dev);
1316 if (!hdev)
1da177e4
LT
1317 return -ENODEV;
1318
1319 hci_req_lock(hdev);
1da177e4
LT
1320
1321 if (!test_bit(HCI_UP, &hdev->flags))
1322 goto done;
1323
1324 /* Drop queues */
1325 skb_queue_purge(&hdev->rx_q);
1326 skb_queue_purge(&hdev->cmd_q);
1327
09fd0de5 1328 hci_dev_lock(hdev);
1da177e4
LT
1329 inquiry_cache_flush(hdev);
1330 hci_conn_hash_flush(hdev);
09fd0de5 1331 hci_dev_unlock(hdev);
1da177e4
LT
1332
1333 if (hdev->flush)
1334 hdev->flush(hdev);
1335
8e87d142 1336 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1337 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1338
1339 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1340 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1341
1342done:
1da177e4
LT
1343 hci_req_unlock(hdev);
1344 hci_dev_put(hdev);
1345 return ret;
1346}
1347
1348int hci_dev_reset_stat(__u16 dev)
1349{
1350 struct hci_dev *hdev;
1351 int ret = 0;
1352
70f23020
AE
1353 hdev = hci_dev_get(dev);
1354 if (!hdev)
1da177e4
LT
1355 return -ENODEV;
1356
1357 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1358
1359 hci_dev_put(hdev);
1360
1361 return ret;
1362}
1363
1364int hci_dev_cmd(unsigned int cmd, void __user *arg)
1365{
1366 struct hci_dev *hdev;
1367 struct hci_dev_req dr;
1368 int err = 0;
1369
1370 if (copy_from_user(&dr, arg, sizeof(dr)))
1371 return -EFAULT;
1372
70f23020
AE
1373 hdev = hci_dev_get(dr.dev_id);
1374 if (!hdev)
1da177e4
LT
1375 return -ENODEV;
1376
1377 switch (cmd) {
1378 case HCISETAUTH:
01178cd4
JH
1379 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1380 HCI_INIT_TIMEOUT);
1da177e4
LT
1381 break;
1382
1383 case HCISETENCRYPT:
1384 if (!lmp_encrypt_capable(hdev)) {
1385 err = -EOPNOTSUPP;
1386 break;
1387 }
1388
1389 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1390 /* Auth must be enabled first */
01178cd4
JH
1391 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1392 HCI_INIT_TIMEOUT);
1da177e4
LT
1393 if (err)
1394 break;
1395 }
1396
01178cd4
JH
1397 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1398 HCI_INIT_TIMEOUT);
1da177e4
LT
1399 break;
1400
1401 case HCISETSCAN:
01178cd4
JH
1402 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1403 HCI_INIT_TIMEOUT);
1da177e4
LT
1404 break;
1405
1da177e4 1406 case HCISETLINKPOL:
01178cd4
JH
1407 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1408 HCI_INIT_TIMEOUT);
1da177e4
LT
1409 break;
1410
1411 case HCISETLINKMODE:
e4e8e37c
MH
1412 hdev->link_mode = ((__u16) dr.dev_opt) &
1413 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1414 break;
1415
1416 case HCISETPTYPE:
1417 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1418 break;
1419
1420 case HCISETACLMTU:
e4e8e37c
MH
1421 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1422 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1423 break;
1424
1425 case HCISETSCOMTU:
e4e8e37c
MH
1426 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1427 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1428 break;
1429
1430 default:
1431 err = -EINVAL;
1432 break;
1433 }
e4e8e37c 1434
1da177e4
LT
1435 hci_dev_put(hdev);
1436 return err;
1437}
1438
1439int hci_get_dev_list(void __user *arg)
1440{
8035ded4 1441 struct hci_dev *hdev;
1da177e4
LT
1442 struct hci_dev_list_req *dl;
1443 struct hci_dev_req *dr;
1da177e4
LT
1444 int n = 0, size, err;
1445 __u16 dev_num;
1446
1447 if (get_user(dev_num, (__u16 __user *) arg))
1448 return -EFAULT;
1449
1450 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1451 return -EINVAL;
1452
1453 size = sizeof(*dl) + dev_num * sizeof(*dr);
1454
70f23020
AE
1455 dl = kzalloc(size, GFP_KERNEL);
1456 if (!dl)
1da177e4
LT
1457 return -ENOMEM;
1458
1459 dr = dl->dev_req;
1460
f20d09d5 1461 read_lock(&hci_dev_list_lock);
8035ded4 1462 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1463 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1464 cancel_delayed_work(&hdev->power_off);
c542a06c 1465
a8b2d5c2
JH
1466 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1467 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1468
1da177e4
LT
1469 (dr + n)->dev_id = hdev->id;
1470 (dr + n)->dev_opt = hdev->flags;
c542a06c 1471
1da177e4
LT
1472 if (++n >= dev_num)
1473 break;
1474 }
f20d09d5 1475 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1476
1477 dl->dev_num = n;
1478 size = sizeof(*dl) + n * sizeof(*dr);
1479
1480 err = copy_to_user(arg, dl, size);
1481 kfree(dl);
1482
1483 return err ? -EFAULT : 0;
1484}
1485
1486int hci_get_dev_info(void __user *arg)
1487{
1488 struct hci_dev *hdev;
1489 struct hci_dev_info di;
1490 int err = 0;
1491
1492 if (copy_from_user(&di, arg, sizeof(di)))
1493 return -EFAULT;
1494
70f23020
AE
1495 hdev = hci_dev_get(di.dev_id);
1496 if (!hdev)
1da177e4
LT
1497 return -ENODEV;
1498
a8b2d5c2 1499 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1500 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1501
a8b2d5c2
JH
1502 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1503 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1504
1da177e4
LT
1505 strcpy(di.name, hdev->name);
1506 di.bdaddr = hdev->bdaddr;
943da25d 1507 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1508 di.flags = hdev->flags;
1509 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1510 if (lmp_bredr_capable(hdev)) {
1511 di.acl_mtu = hdev->acl_mtu;
1512 di.acl_pkts = hdev->acl_pkts;
1513 di.sco_mtu = hdev->sco_mtu;
1514 di.sco_pkts = hdev->sco_pkts;
1515 } else {
1516 di.acl_mtu = hdev->le_mtu;
1517 di.acl_pkts = hdev->le_pkts;
1518 di.sco_mtu = 0;
1519 di.sco_pkts = 0;
1520 }
1da177e4
LT
1521 di.link_policy = hdev->link_policy;
1522 di.link_mode = hdev->link_mode;
1523
1524 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1525 memcpy(&di.features, &hdev->features, sizeof(di.features));
1526
1527 if (copy_to_user(arg, &di, sizeof(di)))
1528 err = -EFAULT;
1529
1530 hci_dev_put(hdev);
1531
1532 return err;
1533}
1534
1535/* ---- Interface to HCI drivers ---- */
1536
611b30f7
MH
1537static int hci_rfkill_set_block(void *data, bool blocked)
1538{
1539 struct hci_dev *hdev = data;
1540
1541 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1542
1543 if (!blocked)
1544 return 0;
1545
1546 hci_dev_do_close(hdev);
1547
1548 return 0;
1549}
1550
1551static const struct rfkill_ops hci_rfkill_ops = {
1552 .set_block = hci_rfkill_set_block,
1553};
1554
ab81cbf9
JH
1555static void hci_power_on(struct work_struct *work)
1556{
1557 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 1558 int err;
ab81cbf9
JH
1559
1560 BT_DBG("%s", hdev->name);
1561
96570ffc
JH
1562 err = hci_dev_open(hdev->id);
1563 if (err < 0) {
1564 mgmt_set_powered_failed(hdev, err);
ab81cbf9 1565 return;
96570ffc 1566 }
ab81cbf9 1567
a8b2d5c2 1568 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1569 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1570 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1571
a8b2d5c2 1572 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1573 mgmt_index_added(hdev);
ab81cbf9
JH
1574}
1575
1576static void hci_power_off(struct work_struct *work)
1577{
3243553f 1578 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1579 power_off.work);
ab81cbf9
JH
1580
1581 BT_DBG("%s", hdev->name);
1582
8ee56540 1583 hci_dev_do_close(hdev);
ab81cbf9
JH
1584}
1585
16ab91ab
JH
1586static void hci_discov_off(struct work_struct *work)
1587{
1588 struct hci_dev *hdev;
1589 u8 scan = SCAN_PAGE;
1590
1591 hdev = container_of(work, struct hci_dev, discov_off.work);
1592
1593 BT_DBG("%s", hdev->name);
1594
09fd0de5 1595 hci_dev_lock(hdev);
16ab91ab
JH
1596
1597 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1598
1599 hdev->discov_timeout = 0;
1600
09fd0de5 1601 hci_dev_unlock(hdev);
16ab91ab
JH
1602}
1603
2aeb9a1a
JH
1604int hci_uuids_clear(struct hci_dev *hdev)
1605{
4821002c 1606 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1607
4821002c
JH
1608 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1609 list_del(&uuid->list);
2aeb9a1a
JH
1610 kfree(uuid);
1611 }
1612
1613 return 0;
1614}
1615
55ed8ca1
JH
1616int hci_link_keys_clear(struct hci_dev *hdev)
1617{
1618 struct list_head *p, *n;
1619
1620 list_for_each_safe(p, n, &hdev->link_keys) {
1621 struct link_key *key;
1622
1623 key = list_entry(p, struct link_key, list);
1624
1625 list_del(p);
1626 kfree(key);
1627 }
1628
1629 return 0;
1630}
1631
b899efaf
VCG
1632int hci_smp_ltks_clear(struct hci_dev *hdev)
1633{
1634 struct smp_ltk *k, *tmp;
1635
1636 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1637 list_del(&k->list);
1638 kfree(k);
1639 }
1640
1641 return 0;
1642}
1643
55ed8ca1
JH
1644struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1645{
8035ded4 1646 struct link_key *k;
55ed8ca1 1647
8035ded4 1648 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1649 if (bacmp(bdaddr, &k->bdaddr) == 0)
1650 return k;
55ed8ca1
JH
1651
1652 return NULL;
1653}
1654
745c0ce3 1655static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1656 u8 key_type, u8 old_key_type)
d25e28ab
JH
1657{
1658 /* Legacy key */
1659 if (key_type < 0x03)
745c0ce3 1660 return true;
d25e28ab
JH
1661
1662 /* Debug keys are insecure so don't store them persistently */
1663 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1664 return false;
d25e28ab
JH
1665
1666 /* Changed combination key and there's no previous one */
1667 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1668 return false;
d25e28ab
JH
1669
1670 /* Security mode 3 case */
1671 if (!conn)
745c0ce3 1672 return true;
d25e28ab
JH
1673
1674 /* Neither local nor remote side had no-bonding as requirement */
1675 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1676 return true;
d25e28ab
JH
1677
1678 /* Local side had dedicated bonding as requirement */
1679 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1680 return true;
d25e28ab
JH
1681
1682 /* Remote side had dedicated bonding as requirement */
1683 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1684 return true;
d25e28ab
JH
1685
1686 /* If none of the above criteria match, then don't store the key
1687 * persistently */
745c0ce3 1688 return false;
d25e28ab
JH
1689}
1690
c9839a11 1691struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1692{
c9839a11 1693 struct smp_ltk *k;
75d262c2 1694
c9839a11
VCG
1695 list_for_each_entry(k, &hdev->long_term_keys, list) {
1696 if (k->ediv != ediv ||
a8c5fb1a 1697 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1698 continue;
1699
c9839a11 1700 return k;
75d262c2
VCG
1701 }
1702
1703 return NULL;
1704}
75d262c2 1705
c9839a11 1706struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1707 u8 addr_type)
75d262c2 1708{
c9839a11 1709 struct smp_ltk *k;
75d262c2 1710
c9839a11
VCG
1711 list_for_each_entry(k, &hdev->long_term_keys, list)
1712 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1713 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1714 return k;
1715
1716 return NULL;
1717}
75d262c2 1718
d25e28ab 1719int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1720 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1721{
1722 struct link_key *key, *old_key;
745c0ce3
VA
1723 u8 old_key_type;
1724 bool persistent;
55ed8ca1
JH
1725
1726 old_key = hci_find_link_key(hdev, bdaddr);
1727 if (old_key) {
1728 old_key_type = old_key->type;
1729 key = old_key;
1730 } else {
12adcf3a 1731 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1732 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1733 if (!key)
1734 return -ENOMEM;
1735 list_add(&key->list, &hdev->link_keys);
1736 }
1737
6ed93dc6 1738 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1739
d25e28ab
JH
1740 /* Some buggy controller combinations generate a changed
1741 * combination key for legacy pairing even when there's no
1742 * previous key */
1743 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1744 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1745 type = HCI_LK_COMBINATION;
655fe6ec
JH
1746 if (conn)
1747 conn->key_type = type;
1748 }
d25e28ab 1749
55ed8ca1 1750 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1751 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1752 key->pin_len = pin_len;
1753
b6020ba0 1754 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1755 key->type = old_key_type;
4748fed2
JH
1756 else
1757 key->type = type;
1758
4df378a1
JH
1759 if (!new_key)
1760 return 0;
1761
1762 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1763
744cf19e 1764 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1765
6ec5bcad
VA
1766 if (conn)
1767 conn->flush_key = !persistent;
55ed8ca1
JH
1768
1769 return 0;
1770}
1771
c9839a11 1772int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1773 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1774 ediv, u8 rand[8])
75d262c2 1775{
c9839a11 1776 struct smp_ltk *key, *old_key;
75d262c2 1777
c9839a11
VCG
1778 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1779 return 0;
75d262c2 1780
c9839a11
VCG
1781 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1782 if (old_key)
75d262c2 1783 key = old_key;
c9839a11
VCG
1784 else {
1785 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1786 if (!key)
1787 return -ENOMEM;
c9839a11 1788 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1789 }
1790
75d262c2 1791 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1792 key->bdaddr_type = addr_type;
1793 memcpy(key->val, tk, sizeof(key->val));
1794 key->authenticated = authenticated;
1795 key->ediv = ediv;
1796 key->enc_size = enc_size;
1797 key->type = type;
1798 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1799
c9839a11
VCG
1800 if (!new_key)
1801 return 0;
75d262c2 1802
261cc5aa
VCG
1803 if (type & HCI_SMP_LTK)
1804 mgmt_new_ltk(hdev, key, 1);
1805
75d262c2
VCG
1806 return 0;
1807}
1808
55ed8ca1
JH
1809int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1810{
1811 struct link_key *key;
1812
1813 key = hci_find_link_key(hdev, bdaddr);
1814 if (!key)
1815 return -ENOENT;
1816
6ed93dc6 1817 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1818
1819 list_del(&key->list);
1820 kfree(key);
1821
1822 return 0;
1823}
1824
b899efaf
VCG
1825int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1826{
1827 struct smp_ltk *k, *tmp;
1828
1829 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1830 if (bacmp(bdaddr, &k->bdaddr))
1831 continue;
1832
6ed93dc6 1833 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1834
1835 list_del(&k->list);
1836 kfree(k);
1837 }
1838
1839 return 0;
1840}
1841
6bd32326 1842/* HCI command timer function */
bda4f23a 1843static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1844{
1845 struct hci_dev *hdev = (void *) arg;
1846
bda4f23a
AE
1847 if (hdev->sent_cmd) {
1848 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1849 u16 opcode = __le16_to_cpu(sent->opcode);
1850
1851 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1852 } else {
1853 BT_ERR("%s command tx timeout", hdev->name);
1854 }
1855
6bd32326 1856 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1857 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1858}
1859
2763eda6 1860struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1861 bdaddr_t *bdaddr)
2763eda6
SJ
1862{
1863 struct oob_data *data;
1864
1865 list_for_each_entry(data, &hdev->remote_oob_data, list)
1866 if (bacmp(bdaddr, &data->bdaddr) == 0)
1867 return data;
1868
1869 return NULL;
1870}
1871
1872int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1873{
1874 struct oob_data *data;
1875
1876 data = hci_find_remote_oob_data(hdev, bdaddr);
1877 if (!data)
1878 return -ENOENT;
1879
6ed93dc6 1880 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1881
1882 list_del(&data->list);
1883 kfree(data);
1884
1885 return 0;
1886}
1887
1888int hci_remote_oob_data_clear(struct hci_dev *hdev)
1889{
1890 struct oob_data *data, *n;
1891
1892 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1893 list_del(&data->list);
1894 kfree(data);
1895 }
1896
1897 return 0;
1898}
1899
1900int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1901 u8 *randomizer)
2763eda6
SJ
1902{
1903 struct oob_data *data;
1904
1905 data = hci_find_remote_oob_data(hdev, bdaddr);
1906
1907 if (!data) {
1908 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1909 if (!data)
1910 return -ENOMEM;
1911
1912 bacpy(&data->bdaddr, bdaddr);
1913 list_add(&data->list, &hdev->remote_oob_data);
1914 }
1915
1916 memcpy(data->hash, hash, sizeof(data->hash));
1917 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1918
6ed93dc6 1919 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1920
1921 return 0;
1922}
1923
04124681 1924struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1925{
8035ded4 1926 struct bdaddr_list *b;
b2a66aad 1927
8035ded4 1928 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1929 if (bacmp(bdaddr, &b->bdaddr) == 0)
1930 return b;
b2a66aad
AJ
1931
1932 return NULL;
1933}
1934
1935int hci_blacklist_clear(struct hci_dev *hdev)
1936{
1937 struct list_head *p, *n;
1938
1939 list_for_each_safe(p, n, &hdev->blacklist) {
1940 struct bdaddr_list *b;
1941
1942 b = list_entry(p, struct bdaddr_list, list);
1943
1944 list_del(p);
1945 kfree(b);
1946 }
1947
1948 return 0;
1949}
1950
88c1fe4b 1951int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1952{
1953 struct bdaddr_list *entry;
b2a66aad
AJ
1954
1955 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1956 return -EBADF;
1957
5e762444
AJ
1958 if (hci_blacklist_lookup(hdev, bdaddr))
1959 return -EEXIST;
b2a66aad
AJ
1960
1961 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1962 if (!entry)
1963 return -ENOMEM;
b2a66aad
AJ
1964
1965 bacpy(&entry->bdaddr, bdaddr);
1966
1967 list_add(&entry->list, &hdev->blacklist);
1968
88c1fe4b 1969 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1970}
1971
88c1fe4b 1972int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1973{
1974 struct bdaddr_list *entry;
b2a66aad 1975
1ec918ce 1976 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1977 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1978
1979 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1980 if (!entry)
5e762444 1981 return -ENOENT;
b2a66aad
AJ
1982
1983 list_del(&entry->list);
1984 kfree(entry);
1985
88c1fe4b 1986 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1987}
1988
42c6b129 1989static void le_scan_param_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1990{
1991 struct le_scan_params *param = (struct le_scan_params *) opt;
1992 struct hci_cp_le_set_scan_param cp;
1993
1994 memset(&cp, 0, sizeof(cp));
1995 cp.type = param->type;
1996 cp.interval = cpu_to_le16(param->interval);
1997 cp.window = cpu_to_le16(param->window);
1998
42c6b129 1999 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
7ba8b4be
AG
2000}
2001
42c6b129 2002static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
2003{
2004 struct hci_cp_le_set_scan_enable cp;
2005
2006 memset(&cp, 0, sizeof(cp));
76a388be 2007 cp.enable = LE_SCAN_ENABLE;
525e296a 2008 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
7ba8b4be 2009
42c6b129 2010 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
7ba8b4be
AG
2011}
2012
2013static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 2014 u16 window, int timeout)
7ba8b4be
AG
2015{
2016 long timeo = msecs_to_jiffies(3000);
2017 struct le_scan_params param;
2018 int err;
2019
2020 BT_DBG("%s", hdev->name);
2021
2022 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2023 return -EINPROGRESS;
2024
2025 param.type = type;
2026 param.interval = interval;
2027 param.window = window;
2028
2029 hci_req_lock(hdev);
2030
01178cd4
JH
2031 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2032 timeo);
7ba8b4be 2033 if (!err)
01178cd4 2034 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
2035
2036 hci_req_unlock(hdev);
2037
2038 if (err < 0)
2039 return err;
2040
46818ed5 2041 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
b6c7515a 2042 timeout);
7ba8b4be
AG
2043
2044 return 0;
2045}
2046
7dbfac1d
AG
2047int hci_cancel_le_scan(struct hci_dev *hdev)
2048{
2049 BT_DBG("%s", hdev->name);
2050
2051 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2052 return -EALREADY;
2053
2054 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2055 struct hci_cp_le_set_scan_enable cp;
2056
2057 /* Send HCI command to disable LE Scan */
2058 memset(&cp, 0, sizeof(cp));
2059 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2060 }
2061
2062 return 0;
2063}
2064
7ba8b4be
AG
2065static void le_scan_disable_work(struct work_struct *work)
2066{
2067 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2068 le_scan_disable.work);
7ba8b4be
AG
2069 struct hci_cp_le_set_scan_enable cp;
2070
2071 BT_DBG("%s", hdev->name);
2072
2073 memset(&cp, 0, sizeof(cp));
2074
2075 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2076}
2077
28b75a89
AG
2078static void le_scan_work(struct work_struct *work)
2079{
2080 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2081 struct le_scan_params *param = &hdev->le_scan_params;
2082
2083 BT_DBG("%s", hdev->name);
2084
04124681
GP
2085 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2086 param->timeout);
28b75a89
AG
2087}
2088
2089int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 2090 int timeout)
28b75a89
AG
2091{
2092 struct le_scan_params *param = &hdev->le_scan_params;
2093
2094 BT_DBG("%s", hdev->name);
2095
f1550478
JH
2096 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2097 return -ENOTSUPP;
2098
28b75a89
AG
2099 if (work_busy(&hdev->le_scan))
2100 return -EINPROGRESS;
2101
2102 param->type = type;
2103 param->interval = interval;
2104 param->window = window;
2105 param->timeout = timeout;
2106
2107 queue_work(system_long_wq, &hdev->le_scan);
2108
2109 return 0;
2110}
2111
9be0dab7
DH
2112/* Alloc HCI device */
2113struct hci_dev *hci_alloc_dev(void)
2114{
2115 struct hci_dev *hdev;
2116
2117 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2118 if (!hdev)
2119 return NULL;
2120
b1b813d4
DH
2121 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2122 hdev->esco_type = (ESCO_HV1);
2123 hdev->link_mode = (HCI_LM_ACCEPT);
2124 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2125 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2126 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2127
b1b813d4
DH
2128 hdev->sniff_max_interval = 800;
2129 hdev->sniff_min_interval = 80;
2130
2131 mutex_init(&hdev->lock);
2132 mutex_init(&hdev->req_lock);
2133
2134 INIT_LIST_HEAD(&hdev->mgmt_pending);
2135 INIT_LIST_HEAD(&hdev->blacklist);
2136 INIT_LIST_HEAD(&hdev->uuids);
2137 INIT_LIST_HEAD(&hdev->link_keys);
2138 INIT_LIST_HEAD(&hdev->long_term_keys);
2139 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2140 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2141
2142 INIT_WORK(&hdev->rx_work, hci_rx_work);
2143 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2144 INIT_WORK(&hdev->tx_work, hci_tx_work);
2145 INIT_WORK(&hdev->power_on, hci_power_on);
2146 INIT_WORK(&hdev->le_scan, le_scan_work);
2147
b1b813d4
DH
2148 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2149 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2150 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2151
b1b813d4
DH
2152 skb_queue_head_init(&hdev->rx_q);
2153 skb_queue_head_init(&hdev->cmd_q);
2154 skb_queue_head_init(&hdev->raw_q);
2155
2156 init_waitqueue_head(&hdev->req_wait_q);
2157
bda4f23a 2158 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2159
b1b813d4
DH
2160 hci_init_sysfs(hdev);
2161 discovery_init(hdev);
9be0dab7
DH
2162
2163 return hdev;
2164}
2165EXPORT_SYMBOL(hci_alloc_dev);
2166
2167/* Free HCI device */
2168void hci_free_dev(struct hci_dev *hdev)
2169{
9be0dab7
DH
2170 /* will free via device release */
2171 put_device(&hdev->dev);
2172}
2173EXPORT_SYMBOL(hci_free_dev);
2174
1da177e4
LT
2175/* Register HCI device */
2176int hci_register_dev(struct hci_dev *hdev)
2177{
b1b813d4 2178 int id, error;
1da177e4 2179
010666a1 2180 if (!hdev->open || !hdev->close)
1da177e4
LT
2181 return -EINVAL;
2182
08add513
MM
2183 /* Do not allow HCI_AMP devices to register at index 0,
2184 * so the index can be used as the AMP controller ID.
2185 */
3df92b31
SL
2186 switch (hdev->dev_type) {
2187 case HCI_BREDR:
2188 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2189 break;
2190 case HCI_AMP:
2191 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2192 break;
2193 default:
2194 return -EINVAL;
1da177e4 2195 }
8e87d142 2196
3df92b31
SL
2197 if (id < 0)
2198 return id;
2199
1da177e4
LT
2200 sprintf(hdev->name, "hci%d", id);
2201 hdev->id = id;
2d8b3a11
AE
2202
2203 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2204
3df92b31
SL
2205 write_lock(&hci_dev_list_lock);
2206 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2207 write_unlock(&hci_dev_list_lock);
1da177e4 2208
32845eb1 2209 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 2210 WQ_MEM_RECLAIM, 1);
33ca954d
DH
2211 if (!hdev->workqueue) {
2212 error = -ENOMEM;
2213 goto err;
2214 }
f48fd9c8 2215
6ead1bbc
JH
2216 hdev->req_workqueue = alloc_workqueue(hdev->name,
2217 WQ_HIGHPRI | WQ_UNBOUND |
2218 WQ_MEM_RECLAIM, 1);
2219 if (!hdev->req_workqueue) {
2220 destroy_workqueue(hdev->workqueue);
2221 error = -ENOMEM;
2222 goto err;
2223 }
2224
33ca954d
DH
2225 error = hci_add_sysfs(hdev);
2226 if (error < 0)
2227 goto err_wqueue;
1da177e4 2228
611b30f7 2229 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2230 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2231 hdev);
611b30f7
MH
2232 if (hdev->rfkill) {
2233 if (rfkill_register(hdev->rfkill) < 0) {
2234 rfkill_destroy(hdev->rfkill);
2235 hdev->rfkill = NULL;
2236 }
2237 }
2238
a8b2d5c2 2239 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2240
2241 if (hdev->dev_type != HCI_AMP)
2242 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2243
1da177e4 2244 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2245 hci_dev_hold(hdev);
1da177e4 2246
19202573 2247 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2248
1da177e4 2249 return id;
f48fd9c8 2250
33ca954d
DH
2251err_wqueue:
2252 destroy_workqueue(hdev->workqueue);
6ead1bbc 2253 destroy_workqueue(hdev->req_workqueue);
33ca954d 2254err:
3df92b31 2255 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2256 write_lock(&hci_dev_list_lock);
f48fd9c8 2257 list_del(&hdev->list);
f20d09d5 2258 write_unlock(&hci_dev_list_lock);
f48fd9c8 2259
33ca954d 2260 return error;
1da177e4
LT
2261}
2262EXPORT_SYMBOL(hci_register_dev);
2263
2264/* Unregister HCI device */
59735631 2265void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2266{
3df92b31 2267 int i, id;
ef222013 2268
c13854ce 2269 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2270
94324962
JH
2271 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2272
3df92b31
SL
2273 id = hdev->id;
2274
f20d09d5 2275 write_lock(&hci_dev_list_lock);
1da177e4 2276 list_del(&hdev->list);
f20d09d5 2277 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2278
2279 hci_dev_do_close(hdev);
2280
cd4c5391 2281 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2282 kfree_skb(hdev->reassembly[i]);
2283
b9b5ef18
GP
2284 cancel_work_sync(&hdev->power_on);
2285
ab81cbf9 2286 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2287 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2288 hci_dev_lock(hdev);
744cf19e 2289 mgmt_index_removed(hdev);
09fd0de5 2290 hci_dev_unlock(hdev);
56e5cb86 2291 }
ab81cbf9 2292
2e58ef3e
JH
2293 /* mgmt_index_removed should take care of emptying the
2294 * pending list */
2295 BUG_ON(!list_empty(&hdev->mgmt_pending));
2296
1da177e4
LT
2297 hci_notify(hdev, HCI_DEV_UNREG);
2298
611b30f7
MH
2299 if (hdev->rfkill) {
2300 rfkill_unregister(hdev->rfkill);
2301 rfkill_destroy(hdev->rfkill);
2302 }
2303
ce242970 2304 hci_del_sysfs(hdev);
147e2d59 2305
f48fd9c8 2306 destroy_workqueue(hdev->workqueue);
6ead1bbc 2307 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2308
09fd0de5 2309 hci_dev_lock(hdev);
e2e0cacb 2310 hci_blacklist_clear(hdev);
2aeb9a1a 2311 hci_uuids_clear(hdev);
55ed8ca1 2312 hci_link_keys_clear(hdev);
b899efaf 2313 hci_smp_ltks_clear(hdev);
2763eda6 2314 hci_remote_oob_data_clear(hdev);
09fd0de5 2315 hci_dev_unlock(hdev);
e2e0cacb 2316
dc946bd8 2317 hci_dev_put(hdev);
3df92b31
SL
2318
2319 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2320}
2321EXPORT_SYMBOL(hci_unregister_dev);
2322
2323/* Suspend HCI device */
2324int hci_suspend_dev(struct hci_dev *hdev)
2325{
2326 hci_notify(hdev, HCI_DEV_SUSPEND);
2327 return 0;
2328}
2329EXPORT_SYMBOL(hci_suspend_dev);
2330
2331/* Resume HCI device */
2332int hci_resume_dev(struct hci_dev *hdev)
2333{
2334 hci_notify(hdev, HCI_DEV_RESUME);
2335 return 0;
2336}
2337EXPORT_SYMBOL(hci_resume_dev);
2338
76bca880
MH
2339/* Receive frame from HCI drivers */
2340int hci_recv_frame(struct sk_buff *skb)
2341{
2342 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2343 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2344 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2345 kfree_skb(skb);
2346 return -ENXIO;
2347 }
2348
d82603c6 2349 /* Incoming skb */
76bca880
MH
2350 bt_cb(skb)->incoming = 1;
2351
2352 /* Time stamp */
2353 __net_timestamp(skb);
2354
76bca880 2355 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2356 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2357
76bca880
MH
2358 return 0;
2359}
2360EXPORT_SYMBOL(hci_recv_frame);
2361
33e882a5 2362static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2363 int count, __u8 index)
33e882a5
SS
2364{
2365 int len = 0;
2366 int hlen = 0;
2367 int remain = count;
2368 struct sk_buff *skb;
2369 struct bt_skb_cb *scb;
2370
2371 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2372 index >= NUM_REASSEMBLY)
33e882a5
SS
2373 return -EILSEQ;
2374
2375 skb = hdev->reassembly[index];
2376
2377 if (!skb) {
2378 switch (type) {
2379 case HCI_ACLDATA_PKT:
2380 len = HCI_MAX_FRAME_SIZE;
2381 hlen = HCI_ACL_HDR_SIZE;
2382 break;
2383 case HCI_EVENT_PKT:
2384 len = HCI_MAX_EVENT_SIZE;
2385 hlen = HCI_EVENT_HDR_SIZE;
2386 break;
2387 case HCI_SCODATA_PKT:
2388 len = HCI_MAX_SCO_SIZE;
2389 hlen = HCI_SCO_HDR_SIZE;
2390 break;
2391 }
2392
1e429f38 2393 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2394 if (!skb)
2395 return -ENOMEM;
2396
2397 scb = (void *) skb->cb;
2398 scb->expect = hlen;
2399 scb->pkt_type = type;
2400
2401 skb->dev = (void *) hdev;
2402 hdev->reassembly[index] = skb;
2403 }
2404
2405 while (count) {
2406 scb = (void *) skb->cb;
89bb46d0 2407 len = min_t(uint, scb->expect, count);
33e882a5
SS
2408
2409 memcpy(skb_put(skb, len), data, len);
2410
2411 count -= len;
2412 data += len;
2413 scb->expect -= len;
2414 remain = count;
2415
2416 switch (type) {
2417 case HCI_EVENT_PKT:
2418 if (skb->len == HCI_EVENT_HDR_SIZE) {
2419 struct hci_event_hdr *h = hci_event_hdr(skb);
2420 scb->expect = h->plen;
2421
2422 if (skb_tailroom(skb) < scb->expect) {
2423 kfree_skb(skb);
2424 hdev->reassembly[index] = NULL;
2425 return -ENOMEM;
2426 }
2427 }
2428 break;
2429
2430 case HCI_ACLDATA_PKT:
2431 if (skb->len == HCI_ACL_HDR_SIZE) {
2432 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2433 scb->expect = __le16_to_cpu(h->dlen);
2434
2435 if (skb_tailroom(skb) < scb->expect) {
2436 kfree_skb(skb);
2437 hdev->reassembly[index] = NULL;
2438 return -ENOMEM;
2439 }
2440 }
2441 break;
2442
2443 case HCI_SCODATA_PKT:
2444 if (skb->len == HCI_SCO_HDR_SIZE) {
2445 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2446 scb->expect = h->dlen;
2447
2448 if (skb_tailroom(skb) < scb->expect) {
2449 kfree_skb(skb);
2450 hdev->reassembly[index] = NULL;
2451 return -ENOMEM;
2452 }
2453 }
2454 break;
2455 }
2456
2457 if (scb->expect == 0) {
2458 /* Complete frame */
2459
2460 bt_cb(skb)->pkt_type = type;
2461 hci_recv_frame(skb);
2462
2463 hdev->reassembly[index] = NULL;
2464 return remain;
2465 }
2466 }
2467
2468 return remain;
2469}
2470
ef222013
MH
2471int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2472{
f39a3c06
SS
2473 int rem = 0;
2474
ef222013
MH
2475 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2476 return -EILSEQ;
2477
da5f6c37 2478 while (count) {
1e429f38 2479 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2480 if (rem < 0)
2481 return rem;
ef222013 2482
f39a3c06
SS
2483 data += (count - rem);
2484 count = rem;
f81c6224 2485 }
ef222013 2486
f39a3c06 2487 return rem;
ef222013
MH
2488}
2489EXPORT_SYMBOL(hci_recv_fragment);
2490
99811510
SS
2491#define STREAM_REASSEMBLY 0
2492
2493int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2494{
2495 int type;
2496 int rem = 0;
2497
da5f6c37 2498 while (count) {
99811510
SS
2499 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2500
2501 if (!skb) {
2502 struct { char type; } *pkt;
2503
2504 /* Start of the frame */
2505 pkt = data;
2506 type = pkt->type;
2507
2508 data++;
2509 count--;
2510 } else
2511 type = bt_cb(skb)->pkt_type;
2512
1e429f38 2513 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2514 STREAM_REASSEMBLY);
99811510
SS
2515 if (rem < 0)
2516 return rem;
2517
2518 data += (count - rem);
2519 count = rem;
f81c6224 2520 }
99811510
SS
2521
2522 return rem;
2523}
2524EXPORT_SYMBOL(hci_recv_stream_fragment);
2525
1da177e4
LT
2526/* ---- Interface to upper protocols ---- */
2527
1da177e4
LT
2528int hci_register_cb(struct hci_cb *cb)
2529{
2530 BT_DBG("%p name %s", cb, cb->name);
2531
f20d09d5 2532 write_lock(&hci_cb_list_lock);
1da177e4 2533 list_add(&cb->list, &hci_cb_list);
f20d09d5 2534 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2535
2536 return 0;
2537}
2538EXPORT_SYMBOL(hci_register_cb);
2539
2540int hci_unregister_cb(struct hci_cb *cb)
2541{
2542 BT_DBG("%p name %s", cb, cb->name);
2543
f20d09d5 2544 write_lock(&hci_cb_list_lock);
1da177e4 2545 list_del(&cb->list);
f20d09d5 2546 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2547
2548 return 0;
2549}
2550EXPORT_SYMBOL(hci_unregister_cb);
2551
2552static int hci_send_frame(struct sk_buff *skb)
2553{
2554 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2555
2556 if (!hdev) {
2557 kfree_skb(skb);
2558 return -ENODEV;
2559 }
2560
0d48d939 2561 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2562
cd82e61c
MH
2563 /* Time stamp */
2564 __net_timestamp(skb);
1da177e4 2565
cd82e61c
MH
2566 /* Send copy to monitor */
2567 hci_send_to_monitor(hdev, skb);
2568
2569 if (atomic_read(&hdev->promisc)) {
2570 /* Send copy to the sockets */
470fe1b5 2571 hci_send_to_sock(hdev, skb);
1da177e4
LT
2572 }
2573
2574 /* Get rid of skb owner, prior to sending to the driver. */
2575 skb_orphan(skb);
2576
2577 return hdev->send(skb);
2578}
2579
3119ae95
JH
2580void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2581{
2582 skb_queue_head_init(&req->cmd_q);
2583 req->hdev = hdev;
5d73e034 2584 req->err = 0;
3119ae95
JH
2585}
2586
2587int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2588{
2589 struct hci_dev *hdev = req->hdev;
2590 struct sk_buff *skb;
2591 unsigned long flags;
2592
2593 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2594
5d73e034
AG
2595 /* If an error occured during request building, remove all HCI
2596 * commands queued on the HCI request queue.
2597 */
2598 if (req->err) {
2599 skb_queue_purge(&req->cmd_q);
2600 return req->err;
2601 }
2602
3119ae95
JH
2603 /* Do not allow empty requests */
2604 if (skb_queue_empty(&req->cmd_q))
382b0c39 2605 return -ENODATA;
3119ae95
JH
2606
2607 skb = skb_peek_tail(&req->cmd_q);
2608 bt_cb(skb)->req.complete = complete;
2609
2610 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2611 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2612 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2613
2614 queue_work(hdev->workqueue, &hdev->cmd_work);
2615
2616 return 0;
2617}
2618
1ca3a9d0 2619static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 2620 u32 plen, const void *param)
1da177e4
LT
2621{
2622 int len = HCI_COMMAND_HDR_SIZE + plen;
2623 struct hci_command_hdr *hdr;
2624 struct sk_buff *skb;
2625
1da177e4 2626 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2627 if (!skb)
2628 return NULL;
1da177e4
LT
2629
2630 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2631 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2632 hdr->plen = plen;
2633
2634 if (plen)
2635 memcpy(skb_put(skb, plen), param, plen);
2636
2637 BT_DBG("skb len %d", skb->len);
2638
0d48d939 2639 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2640 skb->dev = (void *) hdev;
c78ae283 2641
1ca3a9d0
JH
2642 return skb;
2643}
2644
2645/* Send HCI command */
07dc93dd
JH
2646int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2647 const void *param)
1ca3a9d0
JH
2648{
2649 struct sk_buff *skb;
2650
2651 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2652
2653 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2654 if (!skb) {
2655 BT_ERR("%s no memory for command", hdev->name);
2656 return -ENOMEM;
2657 }
2658
11714b3d
JH
2659 /* Stand-alone HCI commands must be flaged as
2660 * single-command requests.
2661 */
2662 bt_cb(skb)->req.start = true;
2663
1da177e4 2664 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2665 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2666
2667 return 0;
2668}
1da177e4 2669
71c76a17 2670/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
2671void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2672 const void *param, u8 event)
71c76a17
JH
2673{
2674 struct hci_dev *hdev = req->hdev;
2675 struct sk_buff *skb;
2676
2677 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2678
34739c1e
AG
2679 /* If an error occured during request building, there is no point in
2680 * queueing the HCI command. We can simply return.
2681 */
2682 if (req->err)
2683 return;
2684
71c76a17
JH
2685 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2686 if (!skb) {
5d73e034
AG
2687 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2688 hdev->name, opcode);
2689 req->err = -ENOMEM;
e348fe6b 2690 return;
71c76a17
JH
2691 }
2692
2693 if (skb_queue_empty(&req->cmd_q))
2694 bt_cb(skb)->req.start = true;
2695
02350a72
JH
2696 bt_cb(skb)->req.event = event;
2697
71c76a17 2698 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2699}
2700
07dc93dd
JH
2701void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2702 const void *param)
02350a72
JH
2703{
2704 hci_req_add_ev(req, opcode, plen, param, 0);
2705}
2706
1da177e4 2707/* Get data from the previously sent command */
a9de9248 2708void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2709{
2710 struct hci_command_hdr *hdr;
2711
2712 if (!hdev->sent_cmd)
2713 return NULL;
2714
2715 hdr = (void *) hdev->sent_cmd->data;
2716
a9de9248 2717 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2718 return NULL;
2719
f0e09510 2720 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2721
2722 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2723}
2724
2725/* Send ACL data */
2726static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2727{
2728 struct hci_acl_hdr *hdr;
2729 int len = skb->len;
2730
badff6d0
ACM
2731 skb_push(skb, HCI_ACL_HDR_SIZE);
2732 skb_reset_transport_header(skb);
9c70220b 2733 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2734 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2735 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2736}
2737
ee22be7e 2738static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2739 struct sk_buff *skb, __u16 flags)
1da177e4 2740{
ee22be7e 2741 struct hci_conn *conn = chan->conn;
1da177e4
LT
2742 struct hci_dev *hdev = conn->hdev;
2743 struct sk_buff *list;
2744
087bfd99
GP
2745 skb->len = skb_headlen(skb);
2746 skb->data_len = 0;
2747
2748 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2749
2750 switch (hdev->dev_type) {
2751 case HCI_BREDR:
2752 hci_add_acl_hdr(skb, conn->handle, flags);
2753 break;
2754 case HCI_AMP:
2755 hci_add_acl_hdr(skb, chan->handle, flags);
2756 break;
2757 default:
2758 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2759 return;
2760 }
087bfd99 2761
70f23020
AE
2762 list = skb_shinfo(skb)->frag_list;
2763 if (!list) {
1da177e4
LT
2764 /* Non fragmented */
2765 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2766
73d80deb 2767 skb_queue_tail(queue, skb);
1da177e4
LT
2768 } else {
2769 /* Fragmented */
2770 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2771
2772 skb_shinfo(skb)->frag_list = NULL;
2773
2774 /* Queue all fragments atomically */
af3e6359 2775 spin_lock(&queue->lock);
1da177e4 2776
73d80deb 2777 __skb_queue_tail(queue, skb);
e702112f
AE
2778
2779 flags &= ~ACL_START;
2780 flags |= ACL_CONT;
1da177e4
LT
2781 do {
2782 skb = list; list = list->next;
8e87d142 2783
1da177e4 2784 skb->dev = (void *) hdev;
0d48d939 2785 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2786 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2787
2788 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2789
73d80deb 2790 __skb_queue_tail(queue, skb);
1da177e4
LT
2791 } while (list);
2792
af3e6359 2793 spin_unlock(&queue->lock);
1da177e4 2794 }
73d80deb
LAD
2795}
2796
2797void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2798{
ee22be7e 2799 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2800
f0e09510 2801 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2802
2803 skb->dev = (void *) hdev;
73d80deb 2804
ee22be7e 2805 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2806
3eff45ea 2807 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2808}
1da177e4
LT
2809
2810/* Send SCO data */
0d861d8b 2811void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2812{
2813 struct hci_dev *hdev = conn->hdev;
2814 struct hci_sco_hdr hdr;
2815
2816 BT_DBG("%s len %d", hdev->name, skb->len);
2817
aca3192c 2818 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2819 hdr.dlen = skb->len;
2820
badff6d0
ACM
2821 skb_push(skb, HCI_SCO_HDR_SIZE);
2822 skb_reset_transport_header(skb);
9c70220b 2823 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2824
2825 skb->dev = (void *) hdev;
0d48d939 2826 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2827
1da177e4 2828 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2829 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2830}
1da177e4
LT
2831
2832/* ---- HCI TX task (outgoing data) ---- */
2833
2834/* HCI Connection scheduler */
6039aa73
GP
2835static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2836 int *quote)
1da177e4
LT
2837{
2838 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2839 struct hci_conn *conn = NULL, *c;
abc5de8f 2840 unsigned int num = 0, min = ~0;
1da177e4 2841
8e87d142 2842 /* We don't have to lock device here. Connections are always
1da177e4 2843 * added and removed with TX task disabled. */
bf4c6325
GP
2844
2845 rcu_read_lock();
2846
2847 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2848 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2849 continue;
769be974
MH
2850
2851 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2852 continue;
2853
1da177e4
LT
2854 num++;
2855
2856 if (c->sent < min) {
2857 min = c->sent;
2858 conn = c;
2859 }
52087a79
LAD
2860
2861 if (hci_conn_num(hdev, type) == num)
2862 break;
1da177e4
LT
2863 }
2864
bf4c6325
GP
2865 rcu_read_unlock();
2866
1da177e4 2867 if (conn) {
6ed58ec5
VT
2868 int cnt, q;
2869
2870 switch (conn->type) {
2871 case ACL_LINK:
2872 cnt = hdev->acl_cnt;
2873 break;
2874 case SCO_LINK:
2875 case ESCO_LINK:
2876 cnt = hdev->sco_cnt;
2877 break;
2878 case LE_LINK:
2879 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2880 break;
2881 default:
2882 cnt = 0;
2883 BT_ERR("Unknown link type");
2884 }
2885
2886 q = cnt / num;
1da177e4
LT
2887 *quote = q ? q : 1;
2888 } else
2889 *quote = 0;
2890
2891 BT_DBG("conn %p quote %d", conn, *quote);
2892 return conn;
2893}
2894
6039aa73 2895static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2896{
2897 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2898 struct hci_conn *c;
1da177e4 2899
bae1f5d9 2900 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2901
bf4c6325
GP
2902 rcu_read_lock();
2903
1da177e4 2904 /* Kill stalled connections */
bf4c6325 2905 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2906 if (c->type == type && c->sent) {
6ed93dc6
AE
2907 BT_ERR("%s killing stalled connection %pMR",
2908 hdev->name, &c->dst);
bed71748 2909 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2910 }
2911 }
bf4c6325
GP
2912
2913 rcu_read_unlock();
1da177e4
LT
2914}
2915
6039aa73
GP
2916static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2917 int *quote)
1da177e4 2918{
73d80deb
LAD
2919 struct hci_conn_hash *h = &hdev->conn_hash;
2920 struct hci_chan *chan = NULL;
abc5de8f 2921 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2922 struct hci_conn *conn;
73d80deb
LAD
2923 int cnt, q, conn_num = 0;
2924
2925 BT_DBG("%s", hdev->name);
2926
bf4c6325
GP
2927 rcu_read_lock();
2928
2929 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2930 struct hci_chan *tmp;
2931
2932 if (conn->type != type)
2933 continue;
2934
2935 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2936 continue;
2937
2938 conn_num++;
2939
8192edef 2940 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2941 struct sk_buff *skb;
2942
2943 if (skb_queue_empty(&tmp->data_q))
2944 continue;
2945
2946 skb = skb_peek(&tmp->data_q);
2947 if (skb->priority < cur_prio)
2948 continue;
2949
2950 if (skb->priority > cur_prio) {
2951 num = 0;
2952 min = ~0;
2953 cur_prio = skb->priority;
2954 }
2955
2956 num++;
2957
2958 if (conn->sent < min) {
2959 min = conn->sent;
2960 chan = tmp;
2961 }
2962 }
2963
2964 if (hci_conn_num(hdev, type) == conn_num)
2965 break;
2966 }
2967
bf4c6325
GP
2968 rcu_read_unlock();
2969
73d80deb
LAD
2970 if (!chan)
2971 return NULL;
2972
2973 switch (chan->conn->type) {
2974 case ACL_LINK:
2975 cnt = hdev->acl_cnt;
2976 break;
bd1eb66b
AE
2977 case AMP_LINK:
2978 cnt = hdev->block_cnt;
2979 break;
73d80deb
LAD
2980 case SCO_LINK:
2981 case ESCO_LINK:
2982 cnt = hdev->sco_cnt;
2983 break;
2984 case LE_LINK:
2985 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2986 break;
2987 default:
2988 cnt = 0;
2989 BT_ERR("Unknown link type");
2990 }
2991
2992 q = cnt / num;
2993 *quote = q ? q : 1;
2994 BT_DBG("chan %p quote %d", chan, *quote);
2995 return chan;
2996}
2997
02b20f0b
LAD
2998static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2999{
3000 struct hci_conn_hash *h = &hdev->conn_hash;
3001 struct hci_conn *conn;
3002 int num = 0;
3003
3004 BT_DBG("%s", hdev->name);
3005
bf4c6325
GP
3006 rcu_read_lock();
3007
3008 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3009 struct hci_chan *chan;
3010
3011 if (conn->type != type)
3012 continue;
3013
3014 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3015 continue;
3016
3017 num++;
3018
8192edef 3019 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3020 struct sk_buff *skb;
3021
3022 if (chan->sent) {
3023 chan->sent = 0;
3024 continue;
3025 }
3026
3027 if (skb_queue_empty(&chan->data_q))
3028 continue;
3029
3030 skb = skb_peek(&chan->data_q);
3031 if (skb->priority >= HCI_PRIO_MAX - 1)
3032 continue;
3033
3034 skb->priority = HCI_PRIO_MAX - 1;
3035
3036 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3037 skb->priority);
02b20f0b
LAD
3038 }
3039
3040 if (hci_conn_num(hdev, type) == num)
3041 break;
3042 }
bf4c6325
GP
3043
3044 rcu_read_unlock();
3045
02b20f0b
LAD
3046}
3047
b71d385a
AE
3048static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3049{
3050 /* Calculate count of blocks used by this packet */
3051 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3052}
3053
6039aa73 3054static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3055{
1da177e4
LT
3056 if (!test_bit(HCI_RAW, &hdev->flags)) {
3057 /* ACL tx timeout must be longer than maximum
3058 * link supervision timeout (40.9 seconds) */
63d2bc1b 3059 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3060 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3061 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3062 }
63d2bc1b 3063}
1da177e4 3064
6039aa73 3065static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3066{
3067 unsigned int cnt = hdev->acl_cnt;
3068 struct hci_chan *chan;
3069 struct sk_buff *skb;
3070 int quote;
3071
3072 __check_timeout(hdev, cnt);
04837f64 3073
73d80deb 3074 while (hdev->acl_cnt &&
a8c5fb1a 3075 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3076 u32 priority = (skb_peek(&chan->data_q))->priority;
3077 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3078 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3079 skb->len, skb->priority);
73d80deb 3080
ec1cce24
LAD
3081 /* Stop if priority has changed */
3082 if (skb->priority < priority)
3083 break;
3084
3085 skb = skb_dequeue(&chan->data_q);
3086
73d80deb 3087 hci_conn_enter_active_mode(chan->conn,
04124681 3088 bt_cb(skb)->force_active);
04837f64 3089
1da177e4
LT
3090 hci_send_frame(skb);
3091 hdev->acl_last_tx = jiffies;
3092
3093 hdev->acl_cnt--;
73d80deb
LAD
3094 chan->sent++;
3095 chan->conn->sent++;
1da177e4
LT
3096 }
3097 }
02b20f0b
LAD
3098
3099 if (cnt != hdev->acl_cnt)
3100 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3101}
3102
6039aa73 3103static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3104{
63d2bc1b 3105 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3106 struct hci_chan *chan;
3107 struct sk_buff *skb;
3108 int quote;
bd1eb66b 3109 u8 type;
b71d385a 3110
63d2bc1b 3111 __check_timeout(hdev, cnt);
b71d385a 3112
bd1eb66b
AE
3113 BT_DBG("%s", hdev->name);
3114
3115 if (hdev->dev_type == HCI_AMP)
3116 type = AMP_LINK;
3117 else
3118 type = ACL_LINK;
3119
b71d385a 3120 while (hdev->block_cnt > 0 &&
bd1eb66b 3121 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3122 u32 priority = (skb_peek(&chan->data_q))->priority;
3123 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3124 int blocks;
3125
3126 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3127 skb->len, skb->priority);
b71d385a
AE
3128
3129 /* Stop if priority has changed */
3130 if (skb->priority < priority)
3131 break;
3132
3133 skb = skb_dequeue(&chan->data_q);
3134
3135 blocks = __get_blocks(hdev, skb);
3136 if (blocks > hdev->block_cnt)
3137 return;
3138
3139 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3140 bt_cb(skb)->force_active);
b71d385a
AE
3141
3142 hci_send_frame(skb);
3143 hdev->acl_last_tx = jiffies;
3144
3145 hdev->block_cnt -= blocks;
3146 quote -= blocks;
3147
3148 chan->sent += blocks;
3149 chan->conn->sent += blocks;
3150 }
3151 }
3152
3153 if (cnt != hdev->block_cnt)
bd1eb66b 3154 hci_prio_recalculate(hdev, type);
b71d385a
AE
3155}
3156
6039aa73 3157static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3158{
3159 BT_DBG("%s", hdev->name);
3160
bd1eb66b
AE
3161 /* No ACL link over BR/EDR controller */
3162 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3163 return;
3164
3165 /* No AMP link over AMP controller */
3166 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3167 return;
3168
3169 switch (hdev->flow_ctl_mode) {
3170 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3171 hci_sched_acl_pkt(hdev);
3172 break;
3173
3174 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3175 hci_sched_acl_blk(hdev);
3176 break;
3177 }
3178}
3179
1da177e4 3180/* Schedule SCO */
6039aa73 3181static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3182{
3183 struct hci_conn *conn;
3184 struct sk_buff *skb;
3185 int quote;
3186
3187 BT_DBG("%s", hdev->name);
3188
52087a79
LAD
3189 if (!hci_conn_num(hdev, SCO_LINK))
3190 return;
3191
1da177e4
LT
3192 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3193 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3194 BT_DBG("skb %p len %d", skb, skb->len);
3195 hci_send_frame(skb);
3196
3197 conn->sent++;
3198 if (conn->sent == ~0)
3199 conn->sent = 0;
3200 }
3201 }
3202}
3203
6039aa73 3204static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3205{
3206 struct hci_conn *conn;
3207 struct sk_buff *skb;
3208 int quote;
3209
3210 BT_DBG("%s", hdev->name);
3211
52087a79
LAD
3212 if (!hci_conn_num(hdev, ESCO_LINK))
3213 return;
3214
8fc9ced3
GP
3215 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3216 &quote))) {
b6a0dc82
MH
3217 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3218 BT_DBG("skb %p len %d", skb, skb->len);
3219 hci_send_frame(skb);
3220
3221 conn->sent++;
3222 if (conn->sent == ~0)
3223 conn->sent = 0;
3224 }
3225 }
3226}
3227
6039aa73 3228static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3229{
73d80deb 3230 struct hci_chan *chan;
6ed58ec5 3231 struct sk_buff *skb;
02b20f0b 3232 int quote, cnt, tmp;
6ed58ec5
VT
3233
3234 BT_DBG("%s", hdev->name);
3235
52087a79
LAD
3236 if (!hci_conn_num(hdev, LE_LINK))
3237 return;
3238
6ed58ec5
VT
3239 if (!test_bit(HCI_RAW, &hdev->flags)) {
3240 /* LE tx timeout must be longer than maximum
3241 * link supervision timeout (40.9 seconds) */
bae1f5d9 3242 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3243 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3244 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3245 }
3246
3247 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3248 tmp = cnt;
73d80deb 3249 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3250 u32 priority = (skb_peek(&chan->data_q))->priority;
3251 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3252 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3253 skb->len, skb->priority);
6ed58ec5 3254
ec1cce24
LAD
3255 /* Stop if priority has changed */
3256 if (skb->priority < priority)
3257 break;
3258
3259 skb = skb_dequeue(&chan->data_q);
3260
6ed58ec5
VT
3261 hci_send_frame(skb);
3262 hdev->le_last_tx = jiffies;
3263
3264 cnt--;
73d80deb
LAD
3265 chan->sent++;
3266 chan->conn->sent++;
6ed58ec5
VT
3267 }
3268 }
73d80deb 3269
6ed58ec5
VT
3270 if (hdev->le_pkts)
3271 hdev->le_cnt = cnt;
3272 else
3273 hdev->acl_cnt = cnt;
02b20f0b
LAD
3274
3275 if (cnt != tmp)
3276 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3277}
3278
3eff45ea 3279static void hci_tx_work(struct work_struct *work)
1da177e4 3280{
3eff45ea 3281 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3282 struct sk_buff *skb;
3283
6ed58ec5 3284 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3285 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3286
3287 /* Schedule queues and send stuff to HCI driver */
3288
3289 hci_sched_acl(hdev);
3290
3291 hci_sched_sco(hdev);
3292
b6a0dc82
MH
3293 hci_sched_esco(hdev);
3294
6ed58ec5
VT
3295 hci_sched_le(hdev);
3296
1da177e4
LT
3297 /* Send next queued raw (unknown type) packet */
3298 while ((skb = skb_dequeue(&hdev->raw_q)))
3299 hci_send_frame(skb);
1da177e4
LT
3300}
3301
25985edc 3302/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3303
3304/* ACL data packet */
6039aa73 3305static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3306{
3307 struct hci_acl_hdr *hdr = (void *) skb->data;
3308 struct hci_conn *conn;
3309 __u16 handle, flags;
3310
3311 skb_pull(skb, HCI_ACL_HDR_SIZE);
3312
3313 handle = __le16_to_cpu(hdr->handle);
3314 flags = hci_flags(handle);
3315 handle = hci_handle(handle);
3316
f0e09510 3317 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3318 handle, flags);
1da177e4
LT
3319
3320 hdev->stat.acl_rx++;
3321
3322 hci_dev_lock(hdev);
3323 conn = hci_conn_hash_lookup_handle(hdev, handle);
3324 hci_dev_unlock(hdev);
8e87d142 3325
1da177e4 3326 if (conn) {
65983fc7 3327 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3328
1da177e4 3329 /* Send to upper protocol */
686ebf28
UF
3330 l2cap_recv_acldata(conn, skb, flags);
3331 return;
1da177e4 3332 } else {
8e87d142 3333 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3334 hdev->name, handle);
1da177e4
LT
3335 }
3336
3337 kfree_skb(skb);
3338}
3339
3340/* SCO data packet */
6039aa73 3341static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3342{
3343 struct hci_sco_hdr *hdr = (void *) skb->data;
3344 struct hci_conn *conn;
3345 __u16 handle;
3346
3347 skb_pull(skb, HCI_SCO_HDR_SIZE);
3348
3349 handle = __le16_to_cpu(hdr->handle);
3350
f0e09510 3351 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3352
3353 hdev->stat.sco_rx++;
3354
3355 hci_dev_lock(hdev);
3356 conn = hci_conn_hash_lookup_handle(hdev, handle);
3357 hci_dev_unlock(hdev);
3358
3359 if (conn) {
1da177e4 3360 /* Send to upper protocol */
686ebf28
UF
3361 sco_recv_scodata(conn, skb);
3362 return;
1da177e4 3363 } else {
8e87d142 3364 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3365 hdev->name, handle);
1da177e4
LT
3366 }
3367
3368 kfree_skb(skb);
3369}
3370
9238f36a
JH
3371static bool hci_req_is_complete(struct hci_dev *hdev)
3372{
3373 struct sk_buff *skb;
3374
3375 skb = skb_peek(&hdev->cmd_q);
3376 if (!skb)
3377 return true;
3378
3379 return bt_cb(skb)->req.start;
3380}
3381
42c6b129
JH
3382static void hci_resend_last(struct hci_dev *hdev)
3383{
3384 struct hci_command_hdr *sent;
3385 struct sk_buff *skb;
3386 u16 opcode;
3387
3388 if (!hdev->sent_cmd)
3389 return;
3390
3391 sent = (void *) hdev->sent_cmd->data;
3392 opcode = __le16_to_cpu(sent->opcode);
3393 if (opcode == HCI_OP_RESET)
3394 return;
3395
3396 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3397 if (!skb)
3398 return;
3399
3400 skb_queue_head(&hdev->cmd_q, skb);
3401 queue_work(hdev->workqueue, &hdev->cmd_work);
3402}
3403
9238f36a
JH
3404void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3405{
3406 hci_req_complete_t req_complete = NULL;
3407 struct sk_buff *skb;
3408 unsigned long flags;
3409
3410 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3411
42c6b129
JH
3412 /* If the completed command doesn't match the last one that was
3413 * sent we need to do special handling of it.
9238f36a 3414 */
42c6b129
JH
3415 if (!hci_sent_cmd_data(hdev, opcode)) {
3416 /* Some CSR based controllers generate a spontaneous
3417 * reset complete event during init and any pending
3418 * command will never be completed. In such a case we
3419 * need to resend whatever was the last sent
3420 * command.
3421 */
3422 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3423 hci_resend_last(hdev);
3424
9238f36a 3425 return;
42c6b129 3426 }
9238f36a
JH
3427
3428 /* If the command succeeded and there's still more commands in
3429 * this request the request is not yet complete.
3430 */
3431 if (!status && !hci_req_is_complete(hdev))
3432 return;
3433
3434 /* If this was the last command in a request the complete
3435 * callback would be found in hdev->sent_cmd instead of the
3436 * command queue (hdev->cmd_q).
3437 */
3438 if (hdev->sent_cmd) {
3439 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3440 if (req_complete)
3441 goto call_complete;
3442 }
3443
3444 /* Remove all pending commands belonging to this request */
3445 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3446 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3447 if (bt_cb(skb)->req.start) {
3448 __skb_queue_head(&hdev->cmd_q, skb);
3449 break;
3450 }
3451
3452 req_complete = bt_cb(skb)->req.complete;
3453 kfree_skb(skb);
3454 }
3455 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3456
3457call_complete:
3458 if (req_complete)
3459 req_complete(hdev, status);
3460}
3461
b78752cc 3462static void hci_rx_work(struct work_struct *work)
1da177e4 3463{
b78752cc 3464 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3465 struct sk_buff *skb;
3466
3467 BT_DBG("%s", hdev->name);
3468
1da177e4 3469 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3470 /* Send copy to monitor */
3471 hci_send_to_monitor(hdev, skb);
3472
1da177e4
LT
3473 if (atomic_read(&hdev->promisc)) {
3474 /* Send copy to the sockets */
470fe1b5 3475 hci_send_to_sock(hdev, skb);
1da177e4
LT
3476 }
3477
3478 if (test_bit(HCI_RAW, &hdev->flags)) {
3479 kfree_skb(skb);
3480 continue;
3481 }
3482
3483 if (test_bit(HCI_INIT, &hdev->flags)) {
3484 /* Don't process data packets in this states. */
0d48d939 3485 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3486 case HCI_ACLDATA_PKT:
3487 case HCI_SCODATA_PKT:
3488 kfree_skb(skb);
3489 continue;
3ff50b79 3490 }
1da177e4
LT
3491 }
3492
3493 /* Process frame */
0d48d939 3494 switch (bt_cb(skb)->pkt_type) {
1da177e4 3495 case HCI_EVENT_PKT:
b78752cc 3496 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3497 hci_event_packet(hdev, skb);
3498 break;
3499
3500 case HCI_ACLDATA_PKT:
3501 BT_DBG("%s ACL data packet", hdev->name);
3502 hci_acldata_packet(hdev, skb);
3503 break;
3504
3505 case HCI_SCODATA_PKT:
3506 BT_DBG("%s SCO data packet", hdev->name);
3507 hci_scodata_packet(hdev, skb);
3508 break;
3509
3510 default:
3511 kfree_skb(skb);
3512 break;
3513 }
3514 }
1da177e4
LT
3515}
3516
c347b765 3517static void hci_cmd_work(struct work_struct *work)
1da177e4 3518{
c347b765 3519 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3520 struct sk_buff *skb;
3521
2104786b
AE
3522 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3523 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3524
1da177e4 3525 /* Send queued commands */
5a08ecce
AE
3526 if (atomic_read(&hdev->cmd_cnt)) {
3527 skb = skb_dequeue(&hdev->cmd_q);
3528 if (!skb)
3529 return;
3530
7585b97a 3531 kfree_skb(hdev->sent_cmd);
1da177e4 3532
70f23020
AE
3533 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3534 if (hdev->sent_cmd) {
1da177e4
LT
3535 atomic_dec(&hdev->cmd_cnt);
3536 hci_send_frame(skb);
7bdb8a5c
SJ
3537 if (test_bit(HCI_RESET, &hdev->flags))
3538 del_timer(&hdev->cmd_timer);
3539 else
3540 mod_timer(&hdev->cmd_timer,
5f246e89 3541 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3542 } else {
3543 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3544 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3545 }
3546 }
3547}
2519a1fc
AG
3548
3549int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3550{
3551 /* General inquiry access code (GIAC) */
3552 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3553 struct hci_cp_inquiry cp;
3554
3555 BT_DBG("%s", hdev->name);
3556
3557 if (test_bit(HCI_INQUIRY, &hdev->flags))
3558 return -EINPROGRESS;
3559
4663262c
JH
3560 inquiry_cache_flush(hdev);
3561
2519a1fc
AG
3562 memset(&cp, 0, sizeof(cp));
3563 memcpy(&cp.lap, lap, sizeof(cp.lap));
3564 cp.length = length;
3565
3566 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3567}
023d5049
AG
3568
3569int hci_cancel_inquiry(struct hci_dev *hdev)
3570{
3571 BT_DBG("%s", hdev->name);
3572
3573 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 3574 return -EALREADY;
023d5049
AG
3575
3576 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3577}
31f7956c
AG
3578
3579u8 bdaddr_to_le(u8 bdaddr_type)
3580{
3581 switch (bdaddr_type) {
3582 case BDADDR_LE_PUBLIC:
3583 return ADDR_LE_DEV_PUBLIC;
3584
3585 default:
3586 /* Fallback to LE Random address type */
3587 return ADDR_LE_DEV_RANDOM;
3588 }
3589}