Bluetooth: Add __hci_cmd_sync_ev function
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
7b1abbbe 82struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event)
75e84b7c
JH
83{
84 struct hci_ev_cmd_complete *ev;
85 struct hci_event_hdr *hdr;
86 struct sk_buff *skb;
87
88 hci_dev_lock(hdev);
89
90 skb = hdev->recv_evt;
91 hdev->recv_evt = NULL;
92
93 hci_dev_unlock(hdev);
94
95 if (!skb)
96 return ERR_PTR(-ENODATA);
97
98 if (skb->len < sizeof(*hdr)) {
99 BT_ERR("Too short HCI event");
100 goto failed;
101 }
102
103 hdr = (void *) skb->data;
104 skb_pull(skb, HCI_EVENT_HDR_SIZE);
105
7b1abbbe
JH
106 if (event) {
107 if (hdr->evt != event)
108 goto failed;
109 return skb;
110 }
111
75e84b7c
JH
112 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
113 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
114 goto failed;
115 }
116
117 if (skb->len < sizeof(*ev)) {
118 BT_ERR("Too short cmd_complete event");
119 goto failed;
120 }
121
122 ev = (void *) skb->data;
123 skb_pull(skb, sizeof(*ev));
124
125 if (opcode == __le16_to_cpu(ev->opcode))
126 return skb;
127
128 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
129 __le16_to_cpu(ev->opcode));
130
131failed:
132 kfree_skb(skb);
133 return ERR_PTR(-ENODATA);
134}
135
7b1abbbe
JH
136struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
137 void *param, u8 event, u32 timeout)
75e84b7c
JH
138{
139 DECLARE_WAITQUEUE(wait, current);
140 struct hci_request req;
141 int err = 0;
142
143 BT_DBG("%s", hdev->name);
144
145 hci_req_init(&req, hdev);
146
7b1abbbe 147 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
148
149 hdev->req_status = HCI_REQ_PEND;
150
151 err = hci_req_run(&req, hci_req_sync_complete);
152 if (err < 0)
153 return ERR_PTR(err);
154
155 add_wait_queue(&hdev->req_wait_q, &wait);
156 set_current_state(TASK_INTERRUPTIBLE);
157
158 schedule_timeout(timeout);
159
160 remove_wait_queue(&hdev->req_wait_q, &wait);
161
162 if (signal_pending(current))
163 return ERR_PTR(-EINTR);
164
165 switch (hdev->req_status) {
166 case HCI_REQ_DONE:
167 err = -bt_to_errno(hdev->req_result);
168 break;
169
170 case HCI_REQ_CANCELED:
171 err = -hdev->req_result;
172 break;
173
174 default:
175 err = -ETIMEDOUT;
176 break;
177 }
178
179 hdev->req_status = hdev->req_result = 0;
180
181 BT_DBG("%s end: err %d", hdev->name, err);
182
183 if (err < 0)
184 return ERR_PTR(err);
185
7b1abbbe
JH
186 return hci_get_cmd_complete(hdev, opcode, event);
187}
188EXPORT_SYMBOL(__hci_cmd_sync_ev);
189
190struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
191 void *param, u32 timeout)
192{
193 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
194}
195EXPORT_SYMBOL(__hci_cmd_sync);
196
1da177e4 197/* Execute request and wait for completion. */
01178cd4 198static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
199 void (*func)(struct hci_request *req,
200 unsigned long opt),
01178cd4 201 unsigned long opt, __u32 timeout)
1da177e4 202{
42c6b129 203 struct hci_request req;
1da177e4
LT
204 DECLARE_WAITQUEUE(wait, current);
205 int err = 0;
206
207 BT_DBG("%s start", hdev->name);
208
42c6b129
JH
209 hci_req_init(&req, hdev);
210
1da177e4
LT
211 hdev->req_status = HCI_REQ_PEND;
212
42c6b129 213 func(&req, opt);
53cce22d 214
42c6b129
JH
215 err = hci_req_run(&req, hci_req_sync_complete);
216 if (err < 0) {
53cce22d 217 hdev->req_status = 0;
920c8300
AG
218
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
42c6b129 223 */
920c8300
AG
224 if (err == -ENODATA)
225 return 0;
226
227 return err;
53cce22d
JH
228 }
229
bc4445c7
AG
230 add_wait_queue(&hdev->req_wait_q, &wait);
231 set_current_state(TASK_INTERRUPTIBLE);
232
1da177e4
LT
233 schedule_timeout(timeout);
234
235 remove_wait_queue(&hdev->req_wait_q, &wait);
236
237 if (signal_pending(current))
238 return -EINTR;
239
240 switch (hdev->req_status) {
241 case HCI_REQ_DONE:
e175072f 242 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
243 break;
244
245 case HCI_REQ_CANCELED:
246 err = -hdev->req_result;
247 break;
248
249 default:
250 err = -ETIMEDOUT;
251 break;
3ff50b79 252 }
1da177e4 253
a5040efa 254 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
255
256 BT_DBG("%s end: err %d", hdev->name, err);
257
258 return err;
259}
260
01178cd4 261static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
262 void (*req)(struct hci_request *req,
263 unsigned long opt),
01178cd4 264 unsigned long opt, __u32 timeout)
1da177e4
LT
265{
266 int ret;
267
7c6a329e
MH
268 if (!test_bit(HCI_UP, &hdev->flags))
269 return -ENETDOWN;
270
1da177e4
LT
271 /* Serialize all requests */
272 hci_req_lock(hdev);
01178cd4 273 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
274 hci_req_unlock(hdev);
275
276 return ret;
277}
278
42c6b129 279static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 280{
42c6b129 281 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
282
283 /* Reset device */
42c6b129
JH
284 set_bit(HCI_RESET, &req->hdev->flags);
285 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
286}
287
42c6b129 288static void bredr_init(struct hci_request *req)
1da177e4 289{
42c6b129 290 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 291
1da177e4 292 /* Read Local Supported Features */
42c6b129 293 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 294
1143e5a6 295 /* Read Local Version */
42c6b129 296 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
297
298 /* Read BD Address */
42c6b129 299 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
300}
301
42c6b129 302static void amp_init(struct hci_request *req)
e61ef499 303{
42c6b129 304 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 305
e61ef499 306 /* Read Local Version */
42c6b129 307 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
308
309 /* Read Local AMP Info */
42c6b129 310 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
311
312 /* Read Data Blk size */
42c6b129 313 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
314}
315
42c6b129 316static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 317{
42c6b129
JH
318 struct hci_dev *hdev = req->hdev;
319 struct hci_request init_req;
e61ef499
AE
320 struct sk_buff *skb;
321
322 BT_DBG("%s %ld", hdev->name, opt);
323
324 /* Driver initialization */
325
42c6b129
JH
326 hci_req_init(&init_req, hdev);
327
e61ef499
AE
328 /* Special commands */
329 while ((skb = skb_dequeue(&hdev->driver_init))) {
330 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
331 skb->dev = (void *) hdev;
332
42c6b129
JH
333 if (skb_queue_empty(&init_req.cmd_q))
334 bt_cb(skb)->req.start = true;
335
336 skb_queue_tail(&init_req.cmd_q, skb);
e61ef499
AE
337 }
338 skb_queue_purge(&hdev->driver_init);
339
42c6b129
JH
340 hci_req_run(&init_req, NULL);
341
11778716
AE
342 /* Reset */
343 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 344 hci_reset_req(req, 0);
11778716 345
e61ef499
AE
346 switch (hdev->dev_type) {
347 case HCI_BREDR:
42c6b129 348 bredr_init(req);
e61ef499
AE
349 break;
350
351 case HCI_AMP:
42c6b129 352 amp_init(req);
e61ef499
AE
353 break;
354
355 default:
356 BT_ERR("Unknown device type %d", hdev->dev_type);
357 break;
358 }
e61ef499
AE
359}
360
42c6b129 361static void bredr_setup(struct hci_request *req)
2177bab5
JH
362{
363 struct hci_cp_delete_stored_link_key cp;
364 __le16 param;
365 __u8 flt_type;
366
367 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 368 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
369
370 /* Read Class of Device */
42c6b129 371 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
372
373 /* Read Local Name */
42c6b129 374 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
375
376 /* Read Voice Setting */
42c6b129 377 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
378
379 /* Clear Event Filters */
380 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 381 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
382
383 /* Connection accept timeout ~20 secs */
384 param = __constant_cpu_to_le16(0x7d00);
42c6b129 385 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
386
387 bacpy(&cp.bdaddr, BDADDR_ANY);
388 cp.delete_all = 0x01;
42c6b129 389 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
f332ec66
JH
390
391 /* Read page scan parameters */
392 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
393 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
394 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
395 }
2177bab5
JH
396}
397
42c6b129 398static void le_setup(struct hci_request *req)
2177bab5
JH
399{
400 /* Read LE Buffer Size */
42c6b129 401 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
402
403 /* Read LE Local Supported Features */
42c6b129 404 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
405
406 /* Read LE Advertising Channel TX Power */
42c6b129 407 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
408
409 /* Read LE White List Size */
42c6b129 410 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
411
412 /* Read LE Supported States */
42c6b129 413 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
2177bab5
JH
414}
415
416static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
417{
418 if (lmp_ext_inq_capable(hdev))
419 return 0x02;
420
421 if (lmp_inq_rssi_capable(hdev))
422 return 0x01;
423
424 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
425 hdev->lmp_subver == 0x0757)
426 return 0x01;
427
428 if (hdev->manufacturer == 15) {
429 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
430 return 0x01;
431 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
432 return 0x01;
433 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
434 return 0x01;
435 }
436
437 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
438 hdev->lmp_subver == 0x1805)
439 return 0x01;
440
441 return 0x00;
442}
443
42c6b129 444static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
445{
446 u8 mode;
447
42c6b129 448 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 449
42c6b129 450 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
451}
452
42c6b129 453static void hci_setup_event_mask(struct hci_request *req)
2177bab5 454{
42c6b129
JH
455 struct hci_dev *hdev = req->hdev;
456
2177bab5
JH
457 /* The second byte is 0xff instead of 0x9f (two reserved bits
458 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
459 * command otherwise.
460 */
461 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
462
463 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
464 * any event mask for pre 1.2 devices.
465 */
466 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
467 return;
468
469 if (lmp_bredr_capable(hdev)) {
470 events[4] |= 0x01; /* Flow Specification Complete */
471 events[4] |= 0x02; /* Inquiry Result with RSSI */
472 events[4] |= 0x04; /* Read Remote Extended Features Complete */
473 events[5] |= 0x08; /* Synchronous Connection Complete */
474 events[5] |= 0x10; /* Synchronous Connection Changed */
475 }
476
477 if (lmp_inq_rssi_capable(hdev))
478 events[4] |= 0x02; /* Inquiry Result with RSSI */
479
480 if (lmp_sniffsubr_capable(hdev))
481 events[5] |= 0x20; /* Sniff Subrating */
482
483 if (lmp_pause_enc_capable(hdev))
484 events[5] |= 0x80; /* Encryption Key Refresh Complete */
485
486 if (lmp_ext_inq_capable(hdev))
487 events[5] |= 0x40; /* Extended Inquiry Result */
488
489 if (lmp_no_flush_capable(hdev))
490 events[7] |= 0x01; /* Enhanced Flush Complete */
491
492 if (lmp_lsto_capable(hdev))
493 events[6] |= 0x80; /* Link Supervision Timeout Changed */
494
495 if (lmp_ssp_capable(hdev)) {
496 events[6] |= 0x01; /* IO Capability Request */
497 events[6] |= 0x02; /* IO Capability Response */
498 events[6] |= 0x04; /* User Confirmation Request */
499 events[6] |= 0x08; /* User Passkey Request */
500 events[6] |= 0x10; /* Remote OOB Data Request */
501 events[6] |= 0x20; /* Simple Pairing Complete */
502 events[7] |= 0x04; /* User Passkey Notification */
503 events[7] |= 0x08; /* Keypress Notification */
504 events[7] |= 0x10; /* Remote Host Supported
505 * Features Notification
506 */
507 }
508
509 if (lmp_le_capable(hdev))
510 events[7] |= 0x20; /* LE Meta-Event */
511
42c6b129 512 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
513
514 if (lmp_le_capable(hdev)) {
515 memset(events, 0, sizeof(events));
516 events[0] = 0x1f;
42c6b129
JH
517 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
518 sizeof(events), events);
2177bab5
JH
519 }
520}
521
42c6b129 522static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 523{
42c6b129
JH
524 struct hci_dev *hdev = req->hdev;
525
2177bab5 526 if (lmp_bredr_capable(hdev))
42c6b129 527 bredr_setup(req);
2177bab5
JH
528
529 if (lmp_le_capable(hdev))
42c6b129 530 le_setup(req);
2177bab5 531
42c6b129 532 hci_setup_event_mask(req);
2177bab5
JH
533
534 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 535 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
536
537 if (lmp_ssp_capable(hdev)) {
538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
539 u8 mode = 0x01;
42c6b129
JH
540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
541 sizeof(mode), &mode);
2177bab5
JH
542 } else {
543 struct hci_cp_write_eir cp;
544
545 memset(hdev->eir, 0, sizeof(hdev->eir));
546 memset(&cp, 0, sizeof(cp));
547
42c6b129 548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
549 }
550 }
551
552 if (lmp_inq_rssi_capable(hdev))
42c6b129 553 hci_setup_inquiry_mode(req);
2177bab5
JH
554
555 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
557
558 if (lmp_ext_feat_capable(hdev)) {
559 struct hci_cp_read_local_ext_features cp;
560
561 cp.page = 0x01;
42c6b129
JH
562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
563 sizeof(cp), &cp);
2177bab5
JH
564 }
565
566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
567 u8 enable = 1;
42c6b129
JH
568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
569 &enable);
2177bab5
JH
570 }
571}
572
42c6b129 573static void hci_setup_link_policy(struct hci_request *req)
2177bab5 574{
42c6b129 575 struct hci_dev *hdev = req->hdev;
2177bab5
JH
576 struct hci_cp_write_def_link_policy cp;
577 u16 link_policy = 0;
578
579 if (lmp_rswitch_capable(hdev))
580 link_policy |= HCI_LP_RSWITCH;
581 if (lmp_hold_capable(hdev))
582 link_policy |= HCI_LP_HOLD;
583 if (lmp_sniff_capable(hdev))
584 link_policy |= HCI_LP_SNIFF;
585 if (lmp_park_capable(hdev))
586 link_policy |= HCI_LP_PARK;
587
588 cp.policy = cpu_to_le16(link_policy);
42c6b129 589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
590}
591
42c6b129 592static void hci_set_le_support(struct hci_request *req)
2177bab5 593{
42c6b129 594 struct hci_dev *hdev = req->hdev;
2177bab5
JH
595 struct hci_cp_write_le_host_supported cp;
596
597 memset(&cp, 0, sizeof(cp));
598
599 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
600 cp.le = 0x01;
601 cp.simul = lmp_le_br_capable(hdev);
602 }
603
604 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
605 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
606 &cp);
2177bab5
JH
607}
608
42c6b129 609static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 610{
42c6b129
JH
611 struct hci_dev *hdev = req->hdev;
612
2177bab5 613 if (hdev->commands[5] & 0x10)
42c6b129 614 hci_setup_link_policy(req);
2177bab5 615
04b4edcb 616 if (lmp_le_capable(hdev)) {
42c6b129 617 hci_set_le_support(req);
04b4edcb
JH
618 hci_update_ad(req);
619 }
2177bab5
JH
620}
621
622static int __hci_init(struct hci_dev *hdev)
623{
624 int err;
625
626 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
627 if (err < 0)
628 return err;
629
630 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
631 * BR/EDR/LE type controllers. AMP controllers only need the
632 * first stage init.
633 */
634 if (hdev->dev_type != HCI_BREDR)
635 return 0;
636
637 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
638 if (err < 0)
639 return err;
640
641 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
642}
643
42c6b129 644static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
645{
646 __u8 scan = opt;
647
42c6b129 648 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
649
650 /* Inquiry and Page scans */
42c6b129 651 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
652}
653
42c6b129 654static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
655{
656 __u8 auth = opt;
657
42c6b129 658 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
659
660 /* Authentication */
42c6b129 661 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
662}
663
42c6b129 664static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
665{
666 __u8 encrypt = opt;
667
42c6b129 668 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 669
e4e8e37c 670 /* Encryption */
42c6b129 671 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
672}
673
42c6b129 674static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
675{
676 __le16 policy = cpu_to_le16(opt);
677
42c6b129 678 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
679
680 /* Default link policy */
42c6b129 681 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
682}
683
8e87d142 684/* Get HCI device by index.
1da177e4
LT
685 * Device is held on return. */
686struct hci_dev *hci_dev_get(int index)
687{
8035ded4 688 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
689
690 BT_DBG("%d", index);
691
692 if (index < 0)
693 return NULL;
694
695 read_lock(&hci_dev_list_lock);
8035ded4 696 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
697 if (d->id == index) {
698 hdev = hci_dev_hold(d);
699 break;
700 }
701 }
702 read_unlock(&hci_dev_list_lock);
703 return hdev;
704}
1da177e4
LT
705
706/* ---- Inquiry support ---- */
ff9ef578 707
30dc78e1
JH
708bool hci_discovery_active(struct hci_dev *hdev)
709{
710 struct discovery_state *discov = &hdev->discovery;
711
6fbe195d 712 switch (discov->state) {
343f935b 713 case DISCOVERY_FINDING:
6fbe195d 714 case DISCOVERY_RESOLVING:
30dc78e1
JH
715 return true;
716
6fbe195d
AG
717 default:
718 return false;
719 }
30dc78e1
JH
720}
721
ff9ef578
JH
722void hci_discovery_set_state(struct hci_dev *hdev, int state)
723{
724 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
725
726 if (hdev->discovery.state == state)
727 return;
728
729 switch (state) {
730 case DISCOVERY_STOPPED:
7b99b659
AG
731 if (hdev->discovery.state != DISCOVERY_STARTING)
732 mgmt_discovering(hdev, 0);
ff9ef578
JH
733 break;
734 case DISCOVERY_STARTING:
735 break;
343f935b 736 case DISCOVERY_FINDING:
ff9ef578
JH
737 mgmt_discovering(hdev, 1);
738 break;
30dc78e1
JH
739 case DISCOVERY_RESOLVING:
740 break;
ff9ef578
JH
741 case DISCOVERY_STOPPING:
742 break;
743 }
744
745 hdev->discovery.state = state;
746}
747
1da177e4
LT
748static void inquiry_cache_flush(struct hci_dev *hdev)
749{
30883512 750 struct discovery_state *cache = &hdev->discovery;
b57c1a56 751 struct inquiry_entry *p, *n;
1da177e4 752
561aafbc
JH
753 list_for_each_entry_safe(p, n, &cache->all, all) {
754 list_del(&p->all);
b57c1a56 755 kfree(p);
1da177e4 756 }
561aafbc
JH
757
758 INIT_LIST_HEAD(&cache->unknown);
759 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
760}
761
a8c5fb1a
GP
762struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
763 bdaddr_t *bdaddr)
1da177e4 764{
30883512 765 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
766 struct inquiry_entry *e;
767
6ed93dc6 768 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 769
561aafbc
JH
770 list_for_each_entry(e, &cache->all, all) {
771 if (!bacmp(&e->data.bdaddr, bdaddr))
772 return e;
773 }
774
775 return NULL;
776}
777
778struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 779 bdaddr_t *bdaddr)
561aafbc 780{
30883512 781 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
782 struct inquiry_entry *e;
783
6ed93dc6 784 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
785
786 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 787 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
788 return e;
789 }
790
791 return NULL;
1da177e4
LT
792}
793
30dc78e1 794struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
795 bdaddr_t *bdaddr,
796 int state)
30dc78e1
JH
797{
798 struct discovery_state *cache = &hdev->discovery;
799 struct inquiry_entry *e;
800
6ed93dc6 801 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
802
803 list_for_each_entry(e, &cache->resolve, list) {
804 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
805 return e;
806 if (!bacmp(&e->data.bdaddr, bdaddr))
807 return e;
808 }
809
810 return NULL;
811}
812
a3d4e20a 813void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 814 struct inquiry_entry *ie)
a3d4e20a
JH
815{
816 struct discovery_state *cache = &hdev->discovery;
817 struct list_head *pos = &cache->resolve;
818 struct inquiry_entry *p;
819
820 list_del(&ie->list);
821
822 list_for_each_entry(p, &cache->resolve, list) {
823 if (p->name_state != NAME_PENDING &&
a8c5fb1a 824 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
825 break;
826 pos = &p->list;
827 }
828
829 list_add(&ie->list, pos);
830}
831
3175405b 832bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 833 bool name_known, bool *ssp)
1da177e4 834{
30883512 835 struct discovery_state *cache = &hdev->discovery;
70f23020 836 struct inquiry_entry *ie;
1da177e4 837
6ed93dc6 838 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 839
2b2fec4d
SJ
840 hci_remove_remote_oob_data(hdev, &data->bdaddr);
841
388fc8fa
JH
842 if (ssp)
843 *ssp = data->ssp_mode;
844
70f23020 845 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 846 if (ie) {
388fc8fa
JH
847 if (ie->data.ssp_mode && ssp)
848 *ssp = true;
849
a3d4e20a 850 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 851 data->rssi != ie->data.rssi) {
a3d4e20a
JH
852 ie->data.rssi = data->rssi;
853 hci_inquiry_cache_update_resolve(hdev, ie);
854 }
855
561aafbc 856 goto update;
a3d4e20a 857 }
561aafbc
JH
858
859 /* Entry not in the cache. Add new one. */
860 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
861 if (!ie)
3175405b 862 return false;
561aafbc
JH
863
864 list_add(&ie->all, &cache->all);
865
866 if (name_known) {
867 ie->name_state = NAME_KNOWN;
868 } else {
869 ie->name_state = NAME_NOT_KNOWN;
870 list_add(&ie->list, &cache->unknown);
871 }
70f23020 872
561aafbc
JH
873update:
874 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 875 ie->name_state != NAME_PENDING) {
561aafbc
JH
876 ie->name_state = NAME_KNOWN;
877 list_del(&ie->list);
1da177e4
LT
878 }
879
70f23020
AE
880 memcpy(&ie->data, data, sizeof(*data));
881 ie->timestamp = jiffies;
1da177e4 882 cache->timestamp = jiffies;
3175405b
JH
883
884 if (ie->name_state == NAME_NOT_KNOWN)
885 return false;
886
887 return true;
1da177e4
LT
888}
889
890static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
891{
30883512 892 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
893 struct inquiry_info *info = (struct inquiry_info *) buf;
894 struct inquiry_entry *e;
895 int copied = 0;
896
561aafbc 897 list_for_each_entry(e, &cache->all, all) {
1da177e4 898 struct inquiry_data *data = &e->data;
b57c1a56
JH
899
900 if (copied >= num)
901 break;
902
1da177e4
LT
903 bacpy(&info->bdaddr, &data->bdaddr);
904 info->pscan_rep_mode = data->pscan_rep_mode;
905 info->pscan_period_mode = data->pscan_period_mode;
906 info->pscan_mode = data->pscan_mode;
907 memcpy(info->dev_class, data->dev_class, 3);
908 info->clock_offset = data->clock_offset;
b57c1a56 909
1da177e4 910 info++;
b57c1a56 911 copied++;
1da177e4
LT
912 }
913
914 BT_DBG("cache %p, copied %d", cache, copied);
915 return copied;
916}
917
42c6b129 918static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
919{
920 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 921 struct hci_dev *hdev = req->hdev;
1da177e4
LT
922 struct hci_cp_inquiry cp;
923
924 BT_DBG("%s", hdev->name);
925
926 if (test_bit(HCI_INQUIRY, &hdev->flags))
927 return;
928
929 /* Start Inquiry */
930 memcpy(&cp.lap, &ir->lap, 3);
931 cp.length = ir->length;
932 cp.num_rsp = ir->num_rsp;
42c6b129 933 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
934}
935
3e13fa1e
AG
936static int wait_inquiry(void *word)
937{
938 schedule();
939 return signal_pending(current);
940}
941
1da177e4
LT
942int hci_inquiry(void __user *arg)
943{
944 __u8 __user *ptr = arg;
945 struct hci_inquiry_req ir;
946 struct hci_dev *hdev;
947 int err = 0, do_inquiry = 0, max_rsp;
948 long timeo;
949 __u8 *buf;
950
951 if (copy_from_user(&ir, ptr, sizeof(ir)))
952 return -EFAULT;
953
5a08ecce
AE
954 hdev = hci_dev_get(ir.dev_id);
955 if (!hdev)
1da177e4
LT
956 return -ENODEV;
957
09fd0de5 958 hci_dev_lock(hdev);
8e87d142 959 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 960 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
961 inquiry_cache_flush(hdev);
962 do_inquiry = 1;
963 }
09fd0de5 964 hci_dev_unlock(hdev);
1da177e4 965
04837f64 966 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
967
968 if (do_inquiry) {
01178cd4
JH
969 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
970 timeo);
70f23020
AE
971 if (err < 0)
972 goto done;
3e13fa1e
AG
973
974 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
975 * cleared). If it is interrupted by a signal, return -EINTR.
976 */
977 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
978 TASK_INTERRUPTIBLE))
979 return -EINTR;
70f23020 980 }
1da177e4 981
8fc9ced3
GP
982 /* for unlimited number of responses we will use buffer with
983 * 255 entries
984 */
1da177e4
LT
985 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
986
987 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
988 * copy it to the user space.
989 */
01df8c31 990 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 991 if (!buf) {
1da177e4
LT
992 err = -ENOMEM;
993 goto done;
994 }
995
09fd0de5 996 hci_dev_lock(hdev);
1da177e4 997 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 998 hci_dev_unlock(hdev);
1da177e4
LT
999
1000 BT_DBG("num_rsp %d", ir.num_rsp);
1001
1002 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1003 ptr += sizeof(ir);
1004 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1005 ir.num_rsp))
1da177e4 1006 err = -EFAULT;
8e87d142 1007 } else
1da177e4
LT
1008 err = -EFAULT;
1009
1010 kfree(buf);
1011
1012done:
1013 hci_dev_put(hdev);
1014 return err;
1015}
1016
3f0f524b
JH
1017static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1018{
1019 u8 ad_len = 0, flags = 0;
1020 size_t name_len;
1021
1022 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1023 flags |= LE_AD_GENERAL;
1024
1025 if (!lmp_bredr_capable(hdev))
1026 flags |= LE_AD_NO_BREDR;
1027
1028 if (lmp_le_br_capable(hdev))
1029 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1030
1031 if (lmp_host_le_br_capable(hdev))
1032 flags |= LE_AD_SIM_LE_BREDR_HOST;
1033
1034 if (flags) {
1035 BT_DBG("adv flags 0x%02x", flags);
1036
1037 ptr[0] = 2;
1038 ptr[1] = EIR_FLAGS;
1039 ptr[2] = flags;
1040
1041 ad_len += 3;
1042 ptr += 3;
1043 }
1044
1045 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1046 ptr[0] = 2;
1047 ptr[1] = EIR_TX_POWER;
1048 ptr[2] = (u8) hdev->adv_tx_power;
1049
1050 ad_len += 3;
1051 ptr += 3;
1052 }
1053
1054 name_len = strlen(hdev->dev_name);
1055 if (name_len > 0) {
1056 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1057
1058 if (name_len > max_len) {
1059 name_len = max_len;
1060 ptr[1] = EIR_NAME_SHORT;
1061 } else
1062 ptr[1] = EIR_NAME_COMPLETE;
1063
1064 ptr[0] = name_len + 1;
1065
1066 memcpy(ptr + 2, hdev->dev_name, name_len);
1067
1068 ad_len += (name_len + 2);
1069 ptr += (name_len + 2);
1070 }
1071
1072 return ad_len;
1073}
1074
04b4edcb 1075void hci_update_ad(struct hci_request *req)
3f0f524b 1076{
04b4edcb 1077 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
1078 struct hci_cp_le_set_adv_data cp;
1079 u8 len;
3f0f524b 1080
04b4edcb
JH
1081 if (!lmp_le_capable(hdev))
1082 return;
3f0f524b
JH
1083
1084 memset(&cp, 0, sizeof(cp));
1085
1086 len = create_ad(hdev, cp.data);
1087
1088 if (hdev->adv_data_len == len &&
04b4edcb
JH
1089 memcmp(cp.data, hdev->adv_data, len) == 0)
1090 return;
3f0f524b
JH
1091
1092 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1093 hdev->adv_data_len = len;
1094
1095 cp.length = len;
3f0f524b 1096
04b4edcb 1097 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
1098}
1099
1da177e4
LT
1100/* ---- HCI ioctl helpers ---- */
1101
1102int hci_dev_open(__u16 dev)
1103{
1104 struct hci_dev *hdev;
1105 int ret = 0;
1106
5a08ecce
AE
1107 hdev = hci_dev_get(dev);
1108 if (!hdev)
1da177e4
LT
1109 return -ENODEV;
1110
1111 BT_DBG("%s %p", hdev->name, hdev);
1112
1113 hci_req_lock(hdev);
1114
94324962
JH
1115 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1116 ret = -ENODEV;
1117 goto done;
1118 }
1119
611b30f7
MH
1120 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1121 ret = -ERFKILL;
1122 goto done;
1123 }
1124
1da177e4
LT
1125 if (test_bit(HCI_UP, &hdev->flags)) {
1126 ret = -EALREADY;
1127 goto done;
1128 }
1129
1130 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1131 set_bit(HCI_RAW, &hdev->flags);
1132
07e3b94a
AE
1133 /* Treat all non BR/EDR controllers as raw devices if
1134 enable_hs is not set */
1135 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
1136 set_bit(HCI_RAW, &hdev->flags);
1137
1da177e4
LT
1138 if (hdev->open(hdev)) {
1139 ret = -EIO;
1140 goto done;
1141 }
1142
1143 if (!test_bit(HCI_RAW, &hdev->flags)) {
1144 atomic_set(&hdev->cmd_cnt, 1);
1145 set_bit(HCI_INIT, &hdev->flags);
2177bab5 1146 ret = __hci_init(hdev);
1da177e4
LT
1147 clear_bit(HCI_INIT, &hdev->flags);
1148 }
1149
1150 if (!ret) {
1151 hci_dev_hold(hdev);
1152 set_bit(HCI_UP, &hdev->flags);
1153 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a
AE
1154 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1155 mgmt_valid_hdev(hdev)) {
09fd0de5 1156 hci_dev_lock(hdev);
744cf19e 1157 mgmt_powered(hdev, 1);
09fd0de5 1158 hci_dev_unlock(hdev);
56e5cb86 1159 }
8e87d142 1160 } else {
1da177e4 1161 /* Init failed, cleanup */
3eff45ea 1162 flush_work(&hdev->tx_work);
c347b765 1163 flush_work(&hdev->cmd_work);
b78752cc 1164 flush_work(&hdev->rx_work);
1da177e4
LT
1165
1166 skb_queue_purge(&hdev->cmd_q);
1167 skb_queue_purge(&hdev->rx_q);
1168
1169 if (hdev->flush)
1170 hdev->flush(hdev);
1171
1172 if (hdev->sent_cmd) {
1173 kfree_skb(hdev->sent_cmd);
1174 hdev->sent_cmd = NULL;
1175 }
1176
1177 hdev->close(hdev);
1178 hdev->flags = 0;
1179 }
1180
1181done:
1182 hci_req_unlock(hdev);
1183 hci_dev_put(hdev);
1184 return ret;
1185}
1186
1187static int hci_dev_do_close(struct hci_dev *hdev)
1188{
1189 BT_DBG("%s %p", hdev->name, hdev);
1190
28b75a89
AG
1191 cancel_work_sync(&hdev->le_scan);
1192
78c04c0b
VCG
1193 cancel_delayed_work(&hdev->power_off);
1194
1da177e4
LT
1195 hci_req_cancel(hdev, ENODEV);
1196 hci_req_lock(hdev);
1197
1198 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1199 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1200 hci_req_unlock(hdev);
1201 return 0;
1202 }
1203
3eff45ea
GP
1204 /* Flush RX and TX works */
1205 flush_work(&hdev->tx_work);
b78752cc 1206 flush_work(&hdev->rx_work);
1da177e4 1207
16ab91ab 1208 if (hdev->discov_timeout > 0) {
e0f9309f 1209 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1210 hdev->discov_timeout = 0;
5e5282bb 1211 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1212 }
1213
a8b2d5c2 1214 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1215 cancel_delayed_work(&hdev->service_cache);
1216
7ba8b4be
AG
1217 cancel_delayed_work_sync(&hdev->le_scan_disable);
1218
09fd0de5 1219 hci_dev_lock(hdev);
1da177e4
LT
1220 inquiry_cache_flush(hdev);
1221 hci_conn_hash_flush(hdev);
09fd0de5 1222 hci_dev_unlock(hdev);
1da177e4
LT
1223
1224 hci_notify(hdev, HCI_DEV_DOWN);
1225
1226 if (hdev->flush)
1227 hdev->flush(hdev);
1228
1229 /* Reset device */
1230 skb_queue_purge(&hdev->cmd_q);
1231 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1232 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1233 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1234 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1235 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1236 clear_bit(HCI_INIT, &hdev->flags);
1237 }
1238
c347b765
GP
1239 /* flush cmd work */
1240 flush_work(&hdev->cmd_work);
1da177e4
LT
1241
1242 /* Drop queues */
1243 skb_queue_purge(&hdev->rx_q);
1244 skb_queue_purge(&hdev->cmd_q);
1245 skb_queue_purge(&hdev->raw_q);
1246
1247 /* Drop last sent command */
1248 if (hdev->sent_cmd) {
b79f44c1 1249 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1250 kfree_skb(hdev->sent_cmd);
1251 hdev->sent_cmd = NULL;
1252 }
1253
b6ddb638
JH
1254 kfree_skb(hdev->recv_evt);
1255 hdev->recv_evt = NULL;
1256
1da177e4
LT
1257 /* After this point our queues are empty
1258 * and no tasks are scheduled. */
1259 hdev->close(hdev);
1260
35b973c9
JH
1261 /* Clear flags */
1262 hdev->flags = 0;
1263 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1264
bb4b2a9a
AE
1265 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1266 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1267 hci_dev_lock(hdev);
1268 mgmt_powered(hdev, 0);
1269 hci_dev_unlock(hdev);
1270 }
5add6af8 1271
ced5c338
AE
1272 /* Controller radio is available but is currently powered down */
1273 hdev->amp_status = 0;
1274
e59fda8d 1275 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1276 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1277
1da177e4
LT
1278 hci_req_unlock(hdev);
1279
1280 hci_dev_put(hdev);
1281 return 0;
1282}
1283
1284int hci_dev_close(__u16 dev)
1285{
1286 struct hci_dev *hdev;
1287 int err;
1288
70f23020
AE
1289 hdev = hci_dev_get(dev);
1290 if (!hdev)
1da177e4 1291 return -ENODEV;
8ee56540
MH
1292
1293 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1294 cancel_delayed_work(&hdev->power_off);
1295
1da177e4 1296 err = hci_dev_do_close(hdev);
8ee56540 1297
1da177e4
LT
1298 hci_dev_put(hdev);
1299 return err;
1300}
1301
1302int hci_dev_reset(__u16 dev)
1303{
1304 struct hci_dev *hdev;
1305 int ret = 0;
1306
70f23020
AE
1307 hdev = hci_dev_get(dev);
1308 if (!hdev)
1da177e4
LT
1309 return -ENODEV;
1310
1311 hci_req_lock(hdev);
1da177e4
LT
1312
1313 if (!test_bit(HCI_UP, &hdev->flags))
1314 goto done;
1315
1316 /* Drop queues */
1317 skb_queue_purge(&hdev->rx_q);
1318 skb_queue_purge(&hdev->cmd_q);
1319
09fd0de5 1320 hci_dev_lock(hdev);
1da177e4
LT
1321 inquiry_cache_flush(hdev);
1322 hci_conn_hash_flush(hdev);
09fd0de5 1323 hci_dev_unlock(hdev);
1da177e4
LT
1324
1325 if (hdev->flush)
1326 hdev->flush(hdev);
1327
8e87d142 1328 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1329 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1330
1331 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1332 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1333
1334done:
1da177e4
LT
1335 hci_req_unlock(hdev);
1336 hci_dev_put(hdev);
1337 return ret;
1338}
1339
1340int hci_dev_reset_stat(__u16 dev)
1341{
1342 struct hci_dev *hdev;
1343 int ret = 0;
1344
70f23020
AE
1345 hdev = hci_dev_get(dev);
1346 if (!hdev)
1da177e4
LT
1347 return -ENODEV;
1348
1349 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1350
1351 hci_dev_put(hdev);
1352
1353 return ret;
1354}
1355
1356int hci_dev_cmd(unsigned int cmd, void __user *arg)
1357{
1358 struct hci_dev *hdev;
1359 struct hci_dev_req dr;
1360 int err = 0;
1361
1362 if (copy_from_user(&dr, arg, sizeof(dr)))
1363 return -EFAULT;
1364
70f23020
AE
1365 hdev = hci_dev_get(dr.dev_id);
1366 if (!hdev)
1da177e4
LT
1367 return -ENODEV;
1368
1369 switch (cmd) {
1370 case HCISETAUTH:
01178cd4
JH
1371 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1372 HCI_INIT_TIMEOUT);
1da177e4
LT
1373 break;
1374
1375 case HCISETENCRYPT:
1376 if (!lmp_encrypt_capable(hdev)) {
1377 err = -EOPNOTSUPP;
1378 break;
1379 }
1380
1381 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1382 /* Auth must be enabled first */
01178cd4
JH
1383 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1384 HCI_INIT_TIMEOUT);
1da177e4
LT
1385 if (err)
1386 break;
1387 }
1388
01178cd4
JH
1389 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1390 HCI_INIT_TIMEOUT);
1da177e4
LT
1391 break;
1392
1393 case HCISETSCAN:
01178cd4
JH
1394 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1395 HCI_INIT_TIMEOUT);
1da177e4
LT
1396 break;
1397
1da177e4 1398 case HCISETLINKPOL:
01178cd4
JH
1399 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1400 HCI_INIT_TIMEOUT);
1da177e4
LT
1401 break;
1402
1403 case HCISETLINKMODE:
e4e8e37c
MH
1404 hdev->link_mode = ((__u16) dr.dev_opt) &
1405 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1406 break;
1407
1408 case HCISETPTYPE:
1409 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1410 break;
1411
1412 case HCISETACLMTU:
e4e8e37c
MH
1413 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1414 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1415 break;
1416
1417 case HCISETSCOMTU:
e4e8e37c
MH
1418 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1419 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1420 break;
1421
1422 default:
1423 err = -EINVAL;
1424 break;
1425 }
e4e8e37c 1426
1da177e4
LT
1427 hci_dev_put(hdev);
1428 return err;
1429}
1430
1431int hci_get_dev_list(void __user *arg)
1432{
8035ded4 1433 struct hci_dev *hdev;
1da177e4
LT
1434 struct hci_dev_list_req *dl;
1435 struct hci_dev_req *dr;
1da177e4
LT
1436 int n = 0, size, err;
1437 __u16 dev_num;
1438
1439 if (get_user(dev_num, (__u16 __user *) arg))
1440 return -EFAULT;
1441
1442 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1443 return -EINVAL;
1444
1445 size = sizeof(*dl) + dev_num * sizeof(*dr);
1446
70f23020
AE
1447 dl = kzalloc(size, GFP_KERNEL);
1448 if (!dl)
1da177e4
LT
1449 return -ENOMEM;
1450
1451 dr = dl->dev_req;
1452
f20d09d5 1453 read_lock(&hci_dev_list_lock);
8035ded4 1454 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1455 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1456 cancel_delayed_work(&hdev->power_off);
c542a06c 1457
a8b2d5c2
JH
1458 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1459 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1460
1da177e4
LT
1461 (dr + n)->dev_id = hdev->id;
1462 (dr + n)->dev_opt = hdev->flags;
c542a06c 1463
1da177e4
LT
1464 if (++n >= dev_num)
1465 break;
1466 }
f20d09d5 1467 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1468
1469 dl->dev_num = n;
1470 size = sizeof(*dl) + n * sizeof(*dr);
1471
1472 err = copy_to_user(arg, dl, size);
1473 kfree(dl);
1474
1475 return err ? -EFAULT : 0;
1476}
1477
1478int hci_get_dev_info(void __user *arg)
1479{
1480 struct hci_dev *hdev;
1481 struct hci_dev_info di;
1482 int err = 0;
1483
1484 if (copy_from_user(&di, arg, sizeof(di)))
1485 return -EFAULT;
1486
70f23020
AE
1487 hdev = hci_dev_get(di.dev_id);
1488 if (!hdev)
1da177e4
LT
1489 return -ENODEV;
1490
a8b2d5c2 1491 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1492 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1493
a8b2d5c2
JH
1494 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1495 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1496
1da177e4
LT
1497 strcpy(di.name, hdev->name);
1498 di.bdaddr = hdev->bdaddr;
943da25d 1499 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1500 di.flags = hdev->flags;
1501 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1502 if (lmp_bredr_capable(hdev)) {
1503 di.acl_mtu = hdev->acl_mtu;
1504 di.acl_pkts = hdev->acl_pkts;
1505 di.sco_mtu = hdev->sco_mtu;
1506 di.sco_pkts = hdev->sco_pkts;
1507 } else {
1508 di.acl_mtu = hdev->le_mtu;
1509 di.acl_pkts = hdev->le_pkts;
1510 di.sco_mtu = 0;
1511 di.sco_pkts = 0;
1512 }
1da177e4
LT
1513 di.link_policy = hdev->link_policy;
1514 di.link_mode = hdev->link_mode;
1515
1516 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1517 memcpy(&di.features, &hdev->features, sizeof(di.features));
1518
1519 if (copy_to_user(arg, &di, sizeof(di)))
1520 err = -EFAULT;
1521
1522 hci_dev_put(hdev);
1523
1524 return err;
1525}
1526
1527/* ---- Interface to HCI drivers ---- */
1528
611b30f7
MH
1529static int hci_rfkill_set_block(void *data, bool blocked)
1530{
1531 struct hci_dev *hdev = data;
1532
1533 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1534
1535 if (!blocked)
1536 return 0;
1537
1538 hci_dev_do_close(hdev);
1539
1540 return 0;
1541}
1542
1543static const struct rfkill_ops hci_rfkill_ops = {
1544 .set_block = hci_rfkill_set_block,
1545};
1546
ab81cbf9
JH
1547static void hci_power_on(struct work_struct *work)
1548{
1549 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1550
1551 BT_DBG("%s", hdev->name);
1552
1553 if (hci_dev_open(hdev->id) < 0)
1554 return;
1555
a8b2d5c2 1556 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1557 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1558 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1559
a8b2d5c2 1560 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1561 mgmt_index_added(hdev);
ab81cbf9
JH
1562}
1563
1564static void hci_power_off(struct work_struct *work)
1565{
3243553f 1566 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1567 power_off.work);
ab81cbf9
JH
1568
1569 BT_DBG("%s", hdev->name);
1570
8ee56540 1571 hci_dev_do_close(hdev);
ab81cbf9
JH
1572}
1573
16ab91ab
JH
1574static void hci_discov_off(struct work_struct *work)
1575{
1576 struct hci_dev *hdev;
1577 u8 scan = SCAN_PAGE;
1578
1579 hdev = container_of(work, struct hci_dev, discov_off.work);
1580
1581 BT_DBG("%s", hdev->name);
1582
09fd0de5 1583 hci_dev_lock(hdev);
16ab91ab
JH
1584
1585 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1586
1587 hdev->discov_timeout = 0;
1588
09fd0de5 1589 hci_dev_unlock(hdev);
16ab91ab
JH
1590}
1591
2aeb9a1a
JH
1592int hci_uuids_clear(struct hci_dev *hdev)
1593{
4821002c 1594 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1595
4821002c
JH
1596 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1597 list_del(&uuid->list);
2aeb9a1a
JH
1598 kfree(uuid);
1599 }
1600
1601 return 0;
1602}
1603
55ed8ca1
JH
1604int hci_link_keys_clear(struct hci_dev *hdev)
1605{
1606 struct list_head *p, *n;
1607
1608 list_for_each_safe(p, n, &hdev->link_keys) {
1609 struct link_key *key;
1610
1611 key = list_entry(p, struct link_key, list);
1612
1613 list_del(p);
1614 kfree(key);
1615 }
1616
1617 return 0;
1618}
1619
b899efaf
VCG
1620int hci_smp_ltks_clear(struct hci_dev *hdev)
1621{
1622 struct smp_ltk *k, *tmp;
1623
1624 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1625 list_del(&k->list);
1626 kfree(k);
1627 }
1628
1629 return 0;
1630}
1631
55ed8ca1
JH
1632struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1633{
8035ded4 1634 struct link_key *k;
55ed8ca1 1635
8035ded4 1636 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1637 if (bacmp(bdaddr, &k->bdaddr) == 0)
1638 return k;
55ed8ca1
JH
1639
1640 return NULL;
1641}
1642
745c0ce3 1643static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1644 u8 key_type, u8 old_key_type)
d25e28ab
JH
1645{
1646 /* Legacy key */
1647 if (key_type < 0x03)
745c0ce3 1648 return true;
d25e28ab
JH
1649
1650 /* Debug keys are insecure so don't store them persistently */
1651 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1652 return false;
d25e28ab
JH
1653
1654 /* Changed combination key and there's no previous one */
1655 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1656 return false;
d25e28ab
JH
1657
1658 /* Security mode 3 case */
1659 if (!conn)
745c0ce3 1660 return true;
d25e28ab
JH
1661
1662 /* Neither local nor remote side had no-bonding as requirement */
1663 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1664 return true;
d25e28ab
JH
1665
1666 /* Local side had dedicated bonding as requirement */
1667 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1668 return true;
d25e28ab
JH
1669
1670 /* Remote side had dedicated bonding as requirement */
1671 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1672 return true;
d25e28ab
JH
1673
1674 /* If none of the above criteria match, then don't store the key
1675 * persistently */
745c0ce3 1676 return false;
d25e28ab
JH
1677}
1678
c9839a11 1679struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1680{
c9839a11 1681 struct smp_ltk *k;
75d262c2 1682
c9839a11
VCG
1683 list_for_each_entry(k, &hdev->long_term_keys, list) {
1684 if (k->ediv != ediv ||
a8c5fb1a 1685 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1686 continue;
1687
c9839a11 1688 return k;
75d262c2
VCG
1689 }
1690
1691 return NULL;
1692}
75d262c2 1693
c9839a11 1694struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1695 u8 addr_type)
75d262c2 1696{
c9839a11 1697 struct smp_ltk *k;
75d262c2 1698
c9839a11
VCG
1699 list_for_each_entry(k, &hdev->long_term_keys, list)
1700 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1701 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1702 return k;
1703
1704 return NULL;
1705}
75d262c2 1706
d25e28ab 1707int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1708 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1709{
1710 struct link_key *key, *old_key;
745c0ce3
VA
1711 u8 old_key_type;
1712 bool persistent;
55ed8ca1
JH
1713
1714 old_key = hci_find_link_key(hdev, bdaddr);
1715 if (old_key) {
1716 old_key_type = old_key->type;
1717 key = old_key;
1718 } else {
12adcf3a 1719 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1720 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1721 if (!key)
1722 return -ENOMEM;
1723 list_add(&key->list, &hdev->link_keys);
1724 }
1725
6ed93dc6 1726 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1727
d25e28ab
JH
1728 /* Some buggy controller combinations generate a changed
1729 * combination key for legacy pairing even when there's no
1730 * previous key */
1731 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1732 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1733 type = HCI_LK_COMBINATION;
655fe6ec
JH
1734 if (conn)
1735 conn->key_type = type;
1736 }
d25e28ab 1737
55ed8ca1 1738 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1739 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1740 key->pin_len = pin_len;
1741
b6020ba0 1742 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1743 key->type = old_key_type;
4748fed2
JH
1744 else
1745 key->type = type;
1746
4df378a1
JH
1747 if (!new_key)
1748 return 0;
1749
1750 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1751
744cf19e 1752 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1753
6ec5bcad
VA
1754 if (conn)
1755 conn->flush_key = !persistent;
55ed8ca1
JH
1756
1757 return 0;
1758}
1759
c9839a11 1760int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1761 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1762 ediv, u8 rand[8])
75d262c2 1763{
c9839a11 1764 struct smp_ltk *key, *old_key;
75d262c2 1765
c9839a11
VCG
1766 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1767 return 0;
75d262c2 1768
c9839a11
VCG
1769 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1770 if (old_key)
75d262c2 1771 key = old_key;
c9839a11
VCG
1772 else {
1773 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1774 if (!key)
1775 return -ENOMEM;
c9839a11 1776 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1777 }
1778
75d262c2 1779 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1780 key->bdaddr_type = addr_type;
1781 memcpy(key->val, tk, sizeof(key->val));
1782 key->authenticated = authenticated;
1783 key->ediv = ediv;
1784 key->enc_size = enc_size;
1785 key->type = type;
1786 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1787
c9839a11
VCG
1788 if (!new_key)
1789 return 0;
75d262c2 1790
261cc5aa
VCG
1791 if (type & HCI_SMP_LTK)
1792 mgmt_new_ltk(hdev, key, 1);
1793
75d262c2
VCG
1794 return 0;
1795}
1796
55ed8ca1
JH
1797int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1798{
1799 struct link_key *key;
1800
1801 key = hci_find_link_key(hdev, bdaddr);
1802 if (!key)
1803 return -ENOENT;
1804
6ed93dc6 1805 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1806
1807 list_del(&key->list);
1808 kfree(key);
1809
1810 return 0;
1811}
1812
b899efaf
VCG
1813int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1814{
1815 struct smp_ltk *k, *tmp;
1816
1817 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1818 if (bacmp(bdaddr, &k->bdaddr))
1819 continue;
1820
6ed93dc6 1821 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1822
1823 list_del(&k->list);
1824 kfree(k);
1825 }
1826
1827 return 0;
1828}
1829
6bd32326 1830/* HCI command timer function */
bda4f23a 1831static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1832{
1833 struct hci_dev *hdev = (void *) arg;
1834
bda4f23a
AE
1835 if (hdev->sent_cmd) {
1836 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1837 u16 opcode = __le16_to_cpu(sent->opcode);
1838
1839 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1840 } else {
1841 BT_ERR("%s command tx timeout", hdev->name);
1842 }
1843
6bd32326 1844 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1845 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1846}
1847
2763eda6 1848struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1849 bdaddr_t *bdaddr)
2763eda6
SJ
1850{
1851 struct oob_data *data;
1852
1853 list_for_each_entry(data, &hdev->remote_oob_data, list)
1854 if (bacmp(bdaddr, &data->bdaddr) == 0)
1855 return data;
1856
1857 return NULL;
1858}
1859
1860int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1861{
1862 struct oob_data *data;
1863
1864 data = hci_find_remote_oob_data(hdev, bdaddr);
1865 if (!data)
1866 return -ENOENT;
1867
6ed93dc6 1868 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1869
1870 list_del(&data->list);
1871 kfree(data);
1872
1873 return 0;
1874}
1875
1876int hci_remote_oob_data_clear(struct hci_dev *hdev)
1877{
1878 struct oob_data *data, *n;
1879
1880 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1881 list_del(&data->list);
1882 kfree(data);
1883 }
1884
1885 return 0;
1886}
1887
1888int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1889 u8 *randomizer)
2763eda6
SJ
1890{
1891 struct oob_data *data;
1892
1893 data = hci_find_remote_oob_data(hdev, bdaddr);
1894
1895 if (!data) {
1896 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1897 if (!data)
1898 return -ENOMEM;
1899
1900 bacpy(&data->bdaddr, bdaddr);
1901 list_add(&data->list, &hdev->remote_oob_data);
1902 }
1903
1904 memcpy(data->hash, hash, sizeof(data->hash));
1905 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1906
6ed93dc6 1907 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1908
1909 return 0;
1910}
1911
04124681 1912struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1913{
8035ded4 1914 struct bdaddr_list *b;
b2a66aad 1915
8035ded4 1916 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1917 if (bacmp(bdaddr, &b->bdaddr) == 0)
1918 return b;
b2a66aad
AJ
1919
1920 return NULL;
1921}
1922
1923int hci_blacklist_clear(struct hci_dev *hdev)
1924{
1925 struct list_head *p, *n;
1926
1927 list_for_each_safe(p, n, &hdev->blacklist) {
1928 struct bdaddr_list *b;
1929
1930 b = list_entry(p, struct bdaddr_list, list);
1931
1932 list_del(p);
1933 kfree(b);
1934 }
1935
1936 return 0;
1937}
1938
88c1fe4b 1939int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1940{
1941 struct bdaddr_list *entry;
b2a66aad
AJ
1942
1943 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1944 return -EBADF;
1945
5e762444
AJ
1946 if (hci_blacklist_lookup(hdev, bdaddr))
1947 return -EEXIST;
b2a66aad
AJ
1948
1949 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1950 if (!entry)
1951 return -ENOMEM;
b2a66aad
AJ
1952
1953 bacpy(&entry->bdaddr, bdaddr);
1954
1955 list_add(&entry->list, &hdev->blacklist);
1956
88c1fe4b 1957 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1958}
1959
88c1fe4b 1960int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1961{
1962 struct bdaddr_list *entry;
b2a66aad 1963
1ec918ce 1964 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1965 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1966
1967 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1968 if (!entry)
5e762444 1969 return -ENOENT;
b2a66aad
AJ
1970
1971 list_del(&entry->list);
1972 kfree(entry);
1973
88c1fe4b 1974 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1975}
1976
42c6b129 1977static void le_scan_param_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1978{
1979 struct le_scan_params *param = (struct le_scan_params *) opt;
1980 struct hci_cp_le_set_scan_param cp;
1981
1982 memset(&cp, 0, sizeof(cp));
1983 cp.type = param->type;
1984 cp.interval = cpu_to_le16(param->interval);
1985 cp.window = cpu_to_le16(param->window);
1986
42c6b129 1987 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
7ba8b4be
AG
1988}
1989
42c6b129 1990static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1991{
1992 struct hci_cp_le_set_scan_enable cp;
1993
1994 memset(&cp, 0, sizeof(cp));
1995 cp.enable = 1;
0431a43c 1996 cp.filter_dup = 1;
7ba8b4be 1997
42c6b129 1998 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
7ba8b4be
AG
1999}
2000
2001static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 2002 u16 window, int timeout)
7ba8b4be
AG
2003{
2004 long timeo = msecs_to_jiffies(3000);
2005 struct le_scan_params param;
2006 int err;
2007
2008 BT_DBG("%s", hdev->name);
2009
2010 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2011 return -EINPROGRESS;
2012
2013 param.type = type;
2014 param.interval = interval;
2015 param.window = window;
2016
2017 hci_req_lock(hdev);
2018
01178cd4
JH
2019 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2020 timeo);
7ba8b4be 2021 if (!err)
01178cd4 2022 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
2023
2024 hci_req_unlock(hdev);
2025
2026 if (err < 0)
2027 return err;
2028
46818ed5
JH
2029 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2030 msecs_to_jiffies(timeout));
7ba8b4be
AG
2031
2032 return 0;
2033}
2034
7dbfac1d
AG
2035int hci_cancel_le_scan(struct hci_dev *hdev)
2036{
2037 BT_DBG("%s", hdev->name);
2038
2039 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2040 return -EALREADY;
2041
2042 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2043 struct hci_cp_le_set_scan_enable cp;
2044
2045 /* Send HCI command to disable LE Scan */
2046 memset(&cp, 0, sizeof(cp));
2047 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2048 }
2049
2050 return 0;
2051}
2052
7ba8b4be
AG
2053static void le_scan_disable_work(struct work_struct *work)
2054{
2055 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2056 le_scan_disable.work);
7ba8b4be
AG
2057 struct hci_cp_le_set_scan_enable cp;
2058
2059 BT_DBG("%s", hdev->name);
2060
2061 memset(&cp, 0, sizeof(cp));
2062
2063 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2064}
2065
28b75a89
AG
2066static void le_scan_work(struct work_struct *work)
2067{
2068 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2069 struct le_scan_params *param = &hdev->le_scan_params;
2070
2071 BT_DBG("%s", hdev->name);
2072
04124681
GP
2073 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2074 param->timeout);
28b75a89
AG
2075}
2076
2077int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 2078 int timeout)
28b75a89
AG
2079{
2080 struct le_scan_params *param = &hdev->le_scan_params;
2081
2082 BT_DBG("%s", hdev->name);
2083
f1550478
JH
2084 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2085 return -ENOTSUPP;
2086
28b75a89
AG
2087 if (work_busy(&hdev->le_scan))
2088 return -EINPROGRESS;
2089
2090 param->type = type;
2091 param->interval = interval;
2092 param->window = window;
2093 param->timeout = timeout;
2094
2095 queue_work(system_long_wq, &hdev->le_scan);
2096
2097 return 0;
2098}
2099
9be0dab7
DH
2100/* Alloc HCI device */
2101struct hci_dev *hci_alloc_dev(void)
2102{
2103 struct hci_dev *hdev;
2104
2105 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2106 if (!hdev)
2107 return NULL;
2108
b1b813d4
DH
2109 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2110 hdev->esco_type = (ESCO_HV1);
2111 hdev->link_mode = (HCI_LM_ACCEPT);
2112 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2113 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2114 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2115
b1b813d4
DH
2116 hdev->sniff_max_interval = 800;
2117 hdev->sniff_min_interval = 80;
2118
2119 mutex_init(&hdev->lock);
2120 mutex_init(&hdev->req_lock);
2121
2122 INIT_LIST_HEAD(&hdev->mgmt_pending);
2123 INIT_LIST_HEAD(&hdev->blacklist);
2124 INIT_LIST_HEAD(&hdev->uuids);
2125 INIT_LIST_HEAD(&hdev->link_keys);
2126 INIT_LIST_HEAD(&hdev->long_term_keys);
2127 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2128 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2129
2130 INIT_WORK(&hdev->rx_work, hci_rx_work);
2131 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2132 INIT_WORK(&hdev->tx_work, hci_tx_work);
2133 INIT_WORK(&hdev->power_on, hci_power_on);
2134 INIT_WORK(&hdev->le_scan, le_scan_work);
2135
b1b813d4
DH
2136 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2137 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2138 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2139
9be0dab7 2140 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
2141 skb_queue_head_init(&hdev->rx_q);
2142 skb_queue_head_init(&hdev->cmd_q);
2143 skb_queue_head_init(&hdev->raw_q);
2144
2145 init_waitqueue_head(&hdev->req_wait_q);
2146
bda4f23a 2147 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2148
b1b813d4
DH
2149 hci_init_sysfs(hdev);
2150 discovery_init(hdev);
9be0dab7
DH
2151
2152 return hdev;
2153}
2154EXPORT_SYMBOL(hci_alloc_dev);
2155
2156/* Free HCI device */
2157void hci_free_dev(struct hci_dev *hdev)
2158{
2159 skb_queue_purge(&hdev->driver_init);
2160
2161 /* will free via device release */
2162 put_device(&hdev->dev);
2163}
2164EXPORT_SYMBOL(hci_free_dev);
2165
1da177e4
LT
2166/* Register HCI device */
2167int hci_register_dev(struct hci_dev *hdev)
2168{
b1b813d4 2169 int id, error;
1da177e4 2170
010666a1 2171 if (!hdev->open || !hdev->close)
1da177e4
LT
2172 return -EINVAL;
2173
08add513
MM
2174 /* Do not allow HCI_AMP devices to register at index 0,
2175 * so the index can be used as the AMP controller ID.
2176 */
3df92b31
SL
2177 switch (hdev->dev_type) {
2178 case HCI_BREDR:
2179 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2180 break;
2181 case HCI_AMP:
2182 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2183 break;
2184 default:
2185 return -EINVAL;
1da177e4 2186 }
8e87d142 2187
3df92b31
SL
2188 if (id < 0)
2189 return id;
2190
1da177e4
LT
2191 sprintf(hdev->name, "hci%d", id);
2192 hdev->id = id;
2d8b3a11
AE
2193
2194 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2195
3df92b31
SL
2196 write_lock(&hci_dev_list_lock);
2197 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2198 write_unlock(&hci_dev_list_lock);
1da177e4 2199
32845eb1 2200 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 2201 WQ_MEM_RECLAIM, 1);
33ca954d
DH
2202 if (!hdev->workqueue) {
2203 error = -ENOMEM;
2204 goto err;
2205 }
f48fd9c8 2206
6ead1bbc
JH
2207 hdev->req_workqueue = alloc_workqueue(hdev->name,
2208 WQ_HIGHPRI | WQ_UNBOUND |
2209 WQ_MEM_RECLAIM, 1);
2210 if (!hdev->req_workqueue) {
2211 destroy_workqueue(hdev->workqueue);
2212 error = -ENOMEM;
2213 goto err;
2214 }
2215
33ca954d
DH
2216 error = hci_add_sysfs(hdev);
2217 if (error < 0)
2218 goto err_wqueue;
1da177e4 2219
611b30f7 2220 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2221 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2222 hdev);
611b30f7
MH
2223 if (hdev->rfkill) {
2224 if (rfkill_register(hdev->rfkill) < 0) {
2225 rfkill_destroy(hdev->rfkill);
2226 hdev->rfkill = NULL;
2227 }
2228 }
2229
a8b2d5c2 2230 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2231
2232 if (hdev->dev_type != HCI_AMP)
2233 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2234
1da177e4 2235 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2236 hci_dev_hold(hdev);
1da177e4 2237
19202573 2238 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2239
1da177e4 2240 return id;
f48fd9c8 2241
33ca954d
DH
2242err_wqueue:
2243 destroy_workqueue(hdev->workqueue);
6ead1bbc 2244 destroy_workqueue(hdev->req_workqueue);
33ca954d 2245err:
3df92b31 2246 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2247 write_lock(&hci_dev_list_lock);
f48fd9c8 2248 list_del(&hdev->list);
f20d09d5 2249 write_unlock(&hci_dev_list_lock);
f48fd9c8 2250
33ca954d 2251 return error;
1da177e4
LT
2252}
2253EXPORT_SYMBOL(hci_register_dev);
2254
2255/* Unregister HCI device */
59735631 2256void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2257{
3df92b31 2258 int i, id;
ef222013 2259
c13854ce 2260 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2261
94324962
JH
2262 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2263
3df92b31
SL
2264 id = hdev->id;
2265
f20d09d5 2266 write_lock(&hci_dev_list_lock);
1da177e4 2267 list_del(&hdev->list);
f20d09d5 2268 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2269
2270 hci_dev_do_close(hdev);
2271
cd4c5391 2272 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2273 kfree_skb(hdev->reassembly[i]);
2274
b9b5ef18
GP
2275 cancel_work_sync(&hdev->power_on);
2276
ab81cbf9 2277 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2278 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2279 hci_dev_lock(hdev);
744cf19e 2280 mgmt_index_removed(hdev);
09fd0de5 2281 hci_dev_unlock(hdev);
56e5cb86 2282 }
ab81cbf9 2283
2e58ef3e
JH
2284 /* mgmt_index_removed should take care of emptying the
2285 * pending list */
2286 BUG_ON(!list_empty(&hdev->mgmt_pending));
2287
1da177e4
LT
2288 hci_notify(hdev, HCI_DEV_UNREG);
2289
611b30f7
MH
2290 if (hdev->rfkill) {
2291 rfkill_unregister(hdev->rfkill);
2292 rfkill_destroy(hdev->rfkill);
2293 }
2294
ce242970 2295 hci_del_sysfs(hdev);
147e2d59 2296
f48fd9c8 2297 destroy_workqueue(hdev->workqueue);
6ead1bbc 2298 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2299
09fd0de5 2300 hci_dev_lock(hdev);
e2e0cacb 2301 hci_blacklist_clear(hdev);
2aeb9a1a 2302 hci_uuids_clear(hdev);
55ed8ca1 2303 hci_link_keys_clear(hdev);
b899efaf 2304 hci_smp_ltks_clear(hdev);
2763eda6 2305 hci_remote_oob_data_clear(hdev);
09fd0de5 2306 hci_dev_unlock(hdev);
e2e0cacb 2307
dc946bd8 2308 hci_dev_put(hdev);
3df92b31
SL
2309
2310 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2311}
2312EXPORT_SYMBOL(hci_unregister_dev);
2313
2314/* Suspend HCI device */
2315int hci_suspend_dev(struct hci_dev *hdev)
2316{
2317 hci_notify(hdev, HCI_DEV_SUSPEND);
2318 return 0;
2319}
2320EXPORT_SYMBOL(hci_suspend_dev);
2321
2322/* Resume HCI device */
2323int hci_resume_dev(struct hci_dev *hdev)
2324{
2325 hci_notify(hdev, HCI_DEV_RESUME);
2326 return 0;
2327}
2328EXPORT_SYMBOL(hci_resume_dev);
2329
76bca880
MH
2330/* Receive frame from HCI drivers */
2331int hci_recv_frame(struct sk_buff *skb)
2332{
2333 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2334 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2335 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2336 kfree_skb(skb);
2337 return -ENXIO;
2338 }
2339
d82603c6 2340 /* Incoming skb */
76bca880
MH
2341 bt_cb(skb)->incoming = 1;
2342
2343 /* Time stamp */
2344 __net_timestamp(skb);
2345
76bca880 2346 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2347 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2348
76bca880
MH
2349 return 0;
2350}
2351EXPORT_SYMBOL(hci_recv_frame);
2352
33e882a5 2353static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2354 int count, __u8 index)
33e882a5
SS
2355{
2356 int len = 0;
2357 int hlen = 0;
2358 int remain = count;
2359 struct sk_buff *skb;
2360 struct bt_skb_cb *scb;
2361
2362 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2363 index >= NUM_REASSEMBLY)
33e882a5
SS
2364 return -EILSEQ;
2365
2366 skb = hdev->reassembly[index];
2367
2368 if (!skb) {
2369 switch (type) {
2370 case HCI_ACLDATA_PKT:
2371 len = HCI_MAX_FRAME_SIZE;
2372 hlen = HCI_ACL_HDR_SIZE;
2373 break;
2374 case HCI_EVENT_PKT:
2375 len = HCI_MAX_EVENT_SIZE;
2376 hlen = HCI_EVENT_HDR_SIZE;
2377 break;
2378 case HCI_SCODATA_PKT:
2379 len = HCI_MAX_SCO_SIZE;
2380 hlen = HCI_SCO_HDR_SIZE;
2381 break;
2382 }
2383
1e429f38 2384 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2385 if (!skb)
2386 return -ENOMEM;
2387
2388 scb = (void *) skb->cb;
2389 scb->expect = hlen;
2390 scb->pkt_type = type;
2391
2392 skb->dev = (void *) hdev;
2393 hdev->reassembly[index] = skb;
2394 }
2395
2396 while (count) {
2397 scb = (void *) skb->cb;
89bb46d0 2398 len = min_t(uint, scb->expect, count);
33e882a5
SS
2399
2400 memcpy(skb_put(skb, len), data, len);
2401
2402 count -= len;
2403 data += len;
2404 scb->expect -= len;
2405 remain = count;
2406
2407 switch (type) {
2408 case HCI_EVENT_PKT:
2409 if (skb->len == HCI_EVENT_HDR_SIZE) {
2410 struct hci_event_hdr *h = hci_event_hdr(skb);
2411 scb->expect = h->plen;
2412
2413 if (skb_tailroom(skb) < scb->expect) {
2414 kfree_skb(skb);
2415 hdev->reassembly[index] = NULL;
2416 return -ENOMEM;
2417 }
2418 }
2419 break;
2420
2421 case HCI_ACLDATA_PKT:
2422 if (skb->len == HCI_ACL_HDR_SIZE) {
2423 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2424 scb->expect = __le16_to_cpu(h->dlen);
2425
2426 if (skb_tailroom(skb) < scb->expect) {
2427 kfree_skb(skb);
2428 hdev->reassembly[index] = NULL;
2429 return -ENOMEM;
2430 }
2431 }
2432 break;
2433
2434 case HCI_SCODATA_PKT:
2435 if (skb->len == HCI_SCO_HDR_SIZE) {
2436 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2437 scb->expect = h->dlen;
2438
2439 if (skb_tailroom(skb) < scb->expect) {
2440 kfree_skb(skb);
2441 hdev->reassembly[index] = NULL;
2442 return -ENOMEM;
2443 }
2444 }
2445 break;
2446 }
2447
2448 if (scb->expect == 0) {
2449 /* Complete frame */
2450
2451 bt_cb(skb)->pkt_type = type;
2452 hci_recv_frame(skb);
2453
2454 hdev->reassembly[index] = NULL;
2455 return remain;
2456 }
2457 }
2458
2459 return remain;
2460}
2461
ef222013
MH
2462int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2463{
f39a3c06
SS
2464 int rem = 0;
2465
ef222013
MH
2466 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2467 return -EILSEQ;
2468
da5f6c37 2469 while (count) {
1e429f38 2470 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2471 if (rem < 0)
2472 return rem;
ef222013 2473
f39a3c06
SS
2474 data += (count - rem);
2475 count = rem;
f81c6224 2476 }
ef222013 2477
f39a3c06 2478 return rem;
ef222013
MH
2479}
2480EXPORT_SYMBOL(hci_recv_fragment);
2481
99811510
SS
2482#define STREAM_REASSEMBLY 0
2483
2484int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2485{
2486 int type;
2487 int rem = 0;
2488
da5f6c37 2489 while (count) {
99811510
SS
2490 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2491
2492 if (!skb) {
2493 struct { char type; } *pkt;
2494
2495 /* Start of the frame */
2496 pkt = data;
2497 type = pkt->type;
2498
2499 data++;
2500 count--;
2501 } else
2502 type = bt_cb(skb)->pkt_type;
2503
1e429f38 2504 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2505 STREAM_REASSEMBLY);
99811510
SS
2506 if (rem < 0)
2507 return rem;
2508
2509 data += (count - rem);
2510 count = rem;
f81c6224 2511 }
99811510
SS
2512
2513 return rem;
2514}
2515EXPORT_SYMBOL(hci_recv_stream_fragment);
2516
1da177e4
LT
2517/* ---- Interface to upper protocols ---- */
2518
1da177e4
LT
2519int hci_register_cb(struct hci_cb *cb)
2520{
2521 BT_DBG("%p name %s", cb, cb->name);
2522
f20d09d5 2523 write_lock(&hci_cb_list_lock);
1da177e4 2524 list_add(&cb->list, &hci_cb_list);
f20d09d5 2525 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2526
2527 return 0;
2528}
2529EXPORT_SYMBOL(hci_register_cb);
2530
2531int hci_unregister_cb(struct hci_cb *cb)
2532{
2533 BT_DBG("%p name %s", cb, cb->name);
2534
f20d09d5 2535 write_lock(&hci_cb_list_lock);
1da177e4 2536 list_del(&cb->list);
f20d09d5 2537 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2538
2539 return 0;
2540}
2541EXPORT_SYMBOL(hci_unregister_cb);
2542
2543static int hci_send_frame(struct sk_buff *skb)
2544{
2545 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2546
2547 if (!hdev) {
2548 kfree_skb(skb);
2549 return -ENODEV;
2550 }
2551
0d48d939 2552 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2553
cd82e61c
MH
2554 /* Time stamp */
2555 __net_timestamp(skb);
1da177e4 2556
cd82e61c
MH
2557 /* Send copy to monitor */
2558 hci_send_to_monitor(hdev, skb);
2559
2560 if (atomic_read(&hdev->promisc)) {
2561 /* Send copy to the sockets */
470fe1b5 2562 hci_send_to_sock(hdev, skb);
1da177e4
LT
2563 }
2564
2565 /* Get rid of skb owner, prior to sending to the driver. */
2566 skb_orphan(skb);
2567
2568 return hdev->send(skb);
2569}
2570
3119ae95
JH
2571void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2572{
2573 skb_queue_head_init(&req->cmd_q);
2574 req->hdev = hdev;
5d73e034 2575 req->err = 0;
3119ae95
JH
2576}
2577
2578int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2579{
2580 struct hci_dev *hdev = req->hdev;
2581 struct sk_buff *skb;
2582 unsigned long flags;
2583
2584 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2585
5d73e034
AG
2586 /* If an error occured during request building, remove all HCI
2587 * commands queued on the HCI request queue.
2588 */
2589 if (req->err) {
2590 skb_queue_purge(&req->cmd_q);
2591 return req->err;
2592 }
2593
3119ae95
JH
2594 /* Do not allow empty requests */
2595 if (skb_queue_empty(&req->cmd_q))
382b0c39 2596 return -ENODATA;
3119ae95
JH
2597
2598 skb = skb_peek_tail(&req->cmd_q);
2599 bt_cb(skb)->req.complete = complete;
2600
2601 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2602 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2603 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2604
2605 queue_work(hdev->workqueue, &hdev->cmd_work);
2606
2607 return 0;
2608}
2609
1ca3a9d0
JH
2610static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2611 u32 plen, void *param)
1da177e4
LT
2612{
2613 int len = HCI_COMMAND_HDR_SIZE + plen;
2614 struct hci_command_hdr *hdr;
2615 struct sk_buff *skb;
2616
1da177e4 2617 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2618 if (!skb)
2619 return NULL;
1da177e4
LT
2620
2621 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2622 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2623 hdr->plen = plen;
2624
2625 if (plen)
2626 memcpy(skb_put(skb, plen), param, plen);
2627
2628 BT_DBG("skb len %d", skb->len);
2629
0d48d939 2630 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2631 skb->dev = (void *) hdev;
c78ae283 2632
1ca3a9d0
JH
2633 return skb;
2634}
2635
2636/* Send HCI command */
2637int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2638{
2639 struct sk_buff *skb;
2640
2641 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2642
2643 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2644 if (!skb) {
2645 BT_ERR("%s no memory for command", hdev->name);
2646 return -ENOMEM;
2647 }
2648
11714b3d
JH
2649 /* Stand-alone HCI commands must be flaged as
2650 * single-command requests.
2651 */
2652 bt_cb(skb)->req.start = true;
2653
1da177e4 2654 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2655 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2656
2657 return 0;
2658}
1da177e4 2659
71c76a17 2660/* Queue a command to an asynchronous HCI request */
02350a72
JH
2661void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void *param,
2662 u8 event)
71c76a17
JH
2663{
2664 struct hci_dev *hdev = req->hdev;
2665 struct sk_buff *skb;
2666
2667 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2668
34739c1e
AG
2669 /* If an error occured during request building, there is no point in
2670 * queueing the HCI command. We can simply return.
2671 */
2672 if (req->err)
2673 return;
2674
71c76a17
JH
2675 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2676 if (!skb) {
5d73e034
AG
2677 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2678 hdev->name, opcode);
2679 req->err = -ENOMEM;
e348fe6b 2680 return;
71c76a17
JH
2681 }
2682
2683 if (skb_queue_empty(&req->cmd_q))
2684 bt_cb(skb)->req.start = true;
2685
02350a72
JH
2686 bt_cb(skb)->req.event = event;
2687
71c76a17 2688 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2689}
2690
02350a72
JH
2691void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2692{
2693 hci_req_add_ev(req, opcode, plen, param, 0);
2694}
2695
1da177e4 2696/* Get data from the previously sent command */
a9de9248 2697void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2698{
2699 struct hci_command_hdr *hdr;
2700
2701 if (!hdev->sent_cmd)
2702 return NULL;
2703
2704 hdr = (void *) hdev->sent_cmd->data;
2705
a9de9248 2706 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2707 return NULL;
2708
f0e09510 2709 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2710
2711 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2712}
2713
2714/* Send ACL data */
2715static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2716{
2717 struct hci_acl_hdr *hdr;
2718 int len = skb->len;
2719
badff6d0
ACM
2720 skb_push(skb, HCI_ACL_HDR_SIZE);
2721 skb_reset_transport_header(skb);
9c70220b 2722 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2723 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2724 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2725}
2726
ee22be7e 2727static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2728 struct sk_buff *skb, __u16 flags)
1da177e4 2729{
ee22be7e 2730 struct hci_conn *conn = chan->conn;
1da177e4
LT
2731 struct hci_dev *hdev = conn->hdev;
2732 struct sk_buff *list;
2733
087bfd99
GP
2734 skb->len = skb_headlen(skb);
2735 skb->data_len = 0;
2736
2737 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2738
2739 switch (hdev->dev_type) {
2740 case HCI_BREDR:
2741 hci_add_acl_hdr(skb, conn->handle, flags);
2742 break;
2743 case HCI_AMP:
2744 hci_add_acl_hdr(skb, chan->handle, flags);
2745 break;
2746 default:
2747 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2748 return;
2749 }
087bfd99 2750
70f23020
AE
2751 list = skb_shinfo(skb)->frag_list;
2752 if (!list) {
1da177e4
LT
2753 /* Non fragmented */
2754 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2755
73d80deb 2756 skb_queue_tail(queue, skb);
1da177e4
LT
2757 } else {
2758 /* Fragmented */
2759 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2760
2761 skb_shinfo(skb)->frag_list = NULL;
2762
2763 /* Queue all fragments atomically */
af3e6359 2764 spin_lock(&queue->lock);
1da177e4 2765
73d80deb 2766 __skb_queue_tail(queue, skb);
e702112f
AE
2767
2768 flags &= ~ACL_START;
2769 flags |= ACL_CONT;
1da177e4
LT
2770 do {
2771 skb = list; list = list->next;
8e87d142 2772
1da177e4 2773 skb->dev = (void *) hdev;
0d48d939 2774 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2775 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2776
2777 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2778
73d80deb 2779 __skb_queue_tail(queue, skb);
1da177e4
LT
2780 } while (list);
2781
af3e6359 2782 spin_unlock(&queue->lock);
1da177e4 2783 }
73d80deb
LAD
2784}
2785
2786void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2787{
ee22be7e 2788 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2789
f0e09510 2790 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2791
2792 skb->dev = (void *) hdev;
73d80deb 2793
ee22be7e 2794 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2795
3eff45ea 2796 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2797}
1da177e4
LT
2798
2799/* Send SCO data */
0d861d8b 2800void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2801{
2802 struct hci_dev *hdev = conn->hdev;
2803 struct hci_sco_hdr hdr;
2804
2805 BT_DBG("%s len %d", hdev->name, skb->len);
2806
aca3192c 2807 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2808 hdr.dlen = skb->len;
2809
badff6d0
ACM
2810 skb_push(skb, HCI_SCO_HDR_SIZE);
2811 skb_reset_transport_header(skb);
9c70220b 2812 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2813
2814 skb->dev = (void *) hdev;
0d48d939 2815 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2816
1da177e4 2817 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2818 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2819}
1da177e4
LT
2820
2821/* ---- HCI TX task (outgoing data) ---- */
2822
2823/* HCI Connection scheduler */
6039aa73
GP
2824static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2825 int *quote)
1da177e4
LT
2826{
2827 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2828 struct hci_conn *conn = NULL, *c;
abc5de8f 2829 unsigned int num = 0, min = ~0;
1da177e4 2830
8e87d142 2831 /* We don't have to lock device here. Connections are always
1da177e4 2832 * added and removed with TX task disabled. */
bf4c6325
GP
2833
2834 rcu_read_lock();
2835
2836 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2837 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2838 continue;
769be974
MH
2839
2840 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2841 continue;
2842
1da177e4
LT
2843 num++;
2844
2845 if (c->sent < min) {
2846 min = c->sent;
2847 conn = c;
2848 }
52087a79
LAD
2849
2850 if (hci_conn_num(hdev, type) == num)
2851 break;
1da177e4
LT
2852 }
2853
bf4c6325
GP
2854 rcu_read_unlock();
2855
1da177e4 2856 if (conn) {
6ed58ec5
VT
2857 int cnt, q;
2858
2859 switch (conn->type) {
2860 case ACL_LINK:
2861 cnt = hdev->acl_cnt;
2862 break;
2863 case SCO_LINK:
2864 case ESCO_LINK:
2865 cnt = hdev->sco_cnt;
2866 break;
2867 case LE_LINK:
2868 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2869 break;
2870 default:
2871 cnt = 0;
2872 BT_ERR("Unknown link type");
2873 }
2874
2875 q = cnt / num;
1da177e4
LT
2876 *quote = q ? q : 1;
2877 } else
2878 *quote = 0;
2879
2880 BT_DBG("conn %p quote %d", conn, *quote);
2881 return conn;
2882}
2883
6039aa73 2884static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2885{
2886 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2887 struct hci_conn *c;
1da177e4 2888
bae1f5d9 2889 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2890
bf4c6325
GP
2891 rcu_read_lock();
2892
1da177e4 2893 /* Kill stalled connections */
bf4c6325 2894 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2895 if (c->type == type && c->sent) {
6ed93dc6
AE
2896 BT_ERR("%s killing stalled connection %pMR",
2897 hdev->name, &c->dst);
bed71748 2898 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2899 }
2900 }
bf4c6325
GP
2901
2902 rcu_read_unlock();
1da177e4
LT
2903}
2904
6039aa73
GP
2905static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2906 int *quote)
1da177e4 2907{
73d80deb
LAD
2908 struct hci_conn_hash *h = &hdev->conn_hash;
2909 struct hci_chan *chan = NULL;
abc5de8f 2910 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2911 struct hci_conn *conn;
73d80deb
LAD
2912 int cnt, q, conn_num = 0;
2913
2914 BT_DBG("%s", hdev->name);
2915
bf4c6325
GP
2916 rcu_read_lock();
2917
2918 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2919 struct hci_chan *tmp;
2920
2921 if (conn->type != type)
2922 continue;
2923
2924 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2925 continue;
2926
2927 conn_num++;
2928
8192edef 2929 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2930 struct sk_buff *skb;
2931
2932 if (skb_queue_empty(&tmp->data_q))
2933 continue;
2934
2935 skb = skb_peek(&tmp->data_q);
2936 if (skb->priority < cur_prio)
2937 continue;
2938
2939 if (skb->priority > cur_prio) {
2940 num = 0;
2941 min = ~0;
2942 cur_prio = skb->priority;
2943 }
2944
2945 num++;
2946
2947 if (conn->sent < min) {
2948 min = conn->sent;
2949 chan = tmp;
2950 }
2951 }
2952
2953 if (hci_conn_num(hdev, type) == conn_num)
2954 break;
2955 }
2956
bf4c6325
GP
2957 rcu_read_unlock();
2958
73d80deb
LAD
2959 if (!chan)
2960 return NULL;
2961
2962 switch (chan->conn->type) {
2963 case ACL_LINK:
2964 cnt = hdev->acl_cnt;
2965 break;
bd1eb66b
AE
2966 case AMP_LINK:
2967 cnt = hdev->block_cnt;
2968 break;
73d80deb
LAD
2969 case SCO_LINK:
2970 case ESCO_LINK:
2971 cnt = hdev->sco_cnt;
2972 break;
2973 case LE_LINK:
2974 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2975 break;
2976 default:
2977 cnt = 0;
2978 BT_ERR("Unknown link type");
2979 }
2980
2981 q = cnt / num;
2982 *quote = q ? q : 1;
2983 BT_DBG("chan %p quote %d", chan, *quote);
2984 return chan;
2985}
2986
02b20f0b
LAD
2987static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2988{
2989 struct hci_conn_hash *h = &hdev->conn_hash;
2990 struct hci_conn *conn;
2991 int num = 0;
2992
2993 BT_DBG("%s", hdev->name);
2994
bf4c6325
GP
2995 rcu_read_lock();
2996
2997 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2998 struct hci_chan *chan;
2999
3000 if (conn->type != type)
3001 continue;
3002
3003 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3004 continue;
3005
3006 num++;
3007
8192edef 3008 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3009 struct sk_buff *skb;
3010
3011 if (chan->sent) {
3012 chan->sent = 0;
3013 continue;
3014 }
3015
3016 if (skb_queue_empty(&chan->data_q))
3017 continue;
3018
3019 skb = skb_peek(&chan->data_q);
3020 if (skb->priority >= HCI_PRIO_MAX - 1)
3021 continue;
3022
3023 skb->priority = HCI_PRIO_MAX - 1;
3024
3025 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3026 skb->priority);
02b20f0b
LAD
3027 }
3028
3029 if (hci_conn_num(hdev, type) == num)
3030 break;
3031 }
bf4c6325
GP
3032
3033 rcu_read_unlock();
3034
02b20f0b
LAD
3035}
3036
b71d385a
AE
3037static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3038{
3039 /* Calculate count of blocks used by this packet */
3040 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3041}
3042
6039aa73 3043static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3044{
1da177e4
LT
3045 if (!test_bit(HCI_RAW, &hdev->flags)) {
3046 /* ACL tx timeout must be longer than maximum
3047 * link supervision timeout (40.9 seconds) */
63d2bc1b 3048 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3049 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3050 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3051 }
63d2bc1b 3052}
1da177e4 3053
6039aa73 3054static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3055{
3056 unsigned int cnt = hdev->acl_cnt;
3057 struct hci_chan *chan;
3058 struct sk_buff *skb;
3059 int quote;
3060
3061 __check_timeout(hdev, cnt);
04837f64 3062
73d80deb 3063 while (hdev->acl_cnt &&
a8c5fb1a 3064 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3065 u32 priority = (skb_peek(&chan->data_q))->priority;
3066 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3067 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3068 skb->len, skb->priority);
73d80deb 3069
ec1cce24
LAD
3070 /* Stop if priority has changed */
3071 if (skb->priority < priority)
3072 break;
3073
3074 skb = skb_dequeue(&chan->data_q);
3075
73d80deb 3076 hci_conn_enter_active_mode(chan->conn,
04124681 3077 bt_cb(skb)->force_active);
04837f64 3078
1da177e4
LT
3079 hci_send_frame(skb);
3080 hdev->acl_last_tx = jiffies;
3081
3082 hdev->acl_cnt--;
73d80deb
LAD
3083 chan->sent++;
3084 chan->conn->sent++;
1da177e4
LT
3085 }
3086 }
02b20f0b
LAD
3087
3088 if (cnt != hdev->acl_cnt)
3089 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3090}
3091
6039aa73 3092static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3093{
63d2bc1b 3094 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3095 struct hci_chan *chan;
3096 struct sk_buff *skb;
3097 int quote;
bd1eb66b 3098 u8 type;
b71d385a 3099
63d2bc1b 3100 __check_timeout(hdev, cnt);
b71d385a 3101
bd1eb66b
AE
3102 BT_DBG("%s", hdev->name);
3103
3104 if (hdev->dev_type == HCI_AMP)
3105 type = AMP_LINK;
3106 else
3107 type = ACL_LINK;
3108
b71d385a 3109 while (hdev->block_cnt > 0 &&
bd1eb66b 3110 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3111 u32 priority = (skb_peek(&chan->data_q))->priority;
3112 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3113 int blocks;
3114
3115 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3116 skb->len, skb->priority);
b71d385a
AE
3117
3118 /* Stop if priority has changed */
3119 if (skb->priority < priority)
3120 break;
3121
3122 skb = skb_dequeue(&chan->data_q);
3123
3124 blocks = __get_blocks(hdev, skb);
3125 if (blocks > hdev->block_cnt)
3126 return;
3127
3128 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3129 bt_cb(skb)->force_active);
b71d385a
AE
3130
3131 hci_send_frame(skb);
3132 hdev->acl_last_tx = jiffies;
3133
3134 hdev->block_cnt -= blocks;
3135 quote -= blocks;
3136
3137 chan->sent += blocks;
3138 chan->conn->sent += blocks;
3139 }
3140 }
3141
3142 if (cnt != hdev->block_cnt)
bd1eb66b 3143 hci_prio_recalculate(hdev, type);
b71d385a
AE
3144}
3145
6039aa73 3146static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3147{
3148 BT_DBG("%s", hdev->name);
3149
bd1eb66b
AE
3150 /* No ACL link over BR/EDR controller */
3151 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3152 return;
3153
3154 /* No AMP link over AMP controller */
3155 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3156 return;
3157
3158 switch (hdev->flow_ctl_mode) {
3159 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3160 hci_sched_acl_pkt(hdev);
3161 break;
3162
3163 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3164 hci_sched_acl_blk(hdev);
3165 break;
3166 }
3167}
3168
1da177e4 3169/* Schedule SCO */
6039aa73 3170static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3171{
3172 struct hci_conn *conn;
3173 struct sk_buff *skb;
3174 int quote;
3175
3176 BT_DBG("%s", hdev->name);
3177
52087a79
LAD
3178 if (!hci_conn_num(hdev, SCO_LINK))
3179 return;
3180
1da177e4
LT
3181 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3182 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3183 BT_DBG("skb %p len %d", skb, skb->len);
3184 hci_send_frame(skb);
3185
3186 conn->sent++;
3187 if (conn->sent == ~0)
3188 conn->sent = 0;
3189 }
3190 }
3191}
3192
6039aa73 3193static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3194{
3195 struct hci_conn *conn;
3196 struct sk_buff *skb;
3197 int quote;
3198
3199 BT_DBG("%s", hdev->name);
3200
52087a79
LAD
3201 if (!hci_conn_num(hdev, ESCO_LINK))
3202 return;
3203
8fc9ced3
GP
3204 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3205 &quote))) {
b6a0dc82
MH
3206 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3207 BT_DBG("skb %p len %d", skb, skb->len);
3208 hci_send_frame(skb);
3209
3210 conn->sent++;
3211 if (conn->sent == ~0)
3212 conn->sent = 0;
3213 }
3214 }
3215}
3216
6039aa73 3217static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3218{
73d80deb 3219 struct hci_chan *chan;
6ed58ec5 3220 struct sk_buff *skb;
02b20f0b 3221 int quote, cnt, tmp;
6ed58ec5
VT
3222
3223 BT_DBG("%s", hdev->name);
3224
52087a79
LAD
3225 if (!hci_conn_num(hdev, LE_LINK))
3226 return;
3227
6ed58ec5
VT
3228 if (!test_bit(HCI_RAW, &hdev->flags)) {
3229 /* LE tx timeout must be longer than maximum
3230 * link supervision timeout (40.9 seconds) */
bae1f5d9 3231 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3232 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3233 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3234 }
3235
3236 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3237 tmp = cnt;
73d80deb 3238 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3239 u32 priority = (skb_peek(&chan->data_q))->priority;
3240 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3241 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3242 skb->len, skb->priority);
6ed58ec5 3243
ec1cce24
LAD
3244 /* Stop if priority has changed */
3245 if (skb->priority < priority)
3246 break;
3247
3248 skb = skb_dequeue(&chan->data_q);
3249
6ed58ec5
VT
3250 hci_send_frame(skb);
3251 hdev->le_last_tx = jiffies;
3252
3253 cnt--;
73d80deb
LAD
3254 chan->sent++;
3255 chan->conn->sent++;
6ed58ec5
VT
3256 }
3257 }
73d80deb 3258
6ed58ec5
VT
3259 if (hdev->le_pkts)
3260 hdev->le_cnt = cnt;
3261 else
3262 hdev->acl_cnt = cnt;
02b20f0b
LAD
3263
3264 if (cnt != tmp)
3265 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3266}
3267
3eff45ea 3268static void hci_tx_work(struct work_struct *work)
1da177e4 3269{
3eff45ea 3270 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3271 struct sk_buff *skb;
3272
6ed58ec5 3273 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3274 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3275
3276 /* Schedule queues and send stuff to HCI driver */
3277
3278 hci_sched_acl(hdev);
3279
3280 hci_sched_sco(hdev);
3281
b6a0dc82
MH
3282 hci_sched_esco(hdev);
3283
6ed58ec5
VT
3284 hci_sched_le(hdev);
3285
1da177e4
LT
3286 /* Send next queued raw (unknown type) packet */
3287 while ((skb = skb_dequeue(&hdev->raw_q)))
3288 hci_send_frame(skb);
1da177e4
LT
3289}
3290
25985edc 3291/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3292
3293/* ACL data packet */
6039aa73 3294static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3295{
3296 struct hci_acl_hdr *hdr = (void *) skb->data;
3297 struct hci_conn *conn;
3298 __u16 handle, flags;
3299
3300 skb_pull(skb, HCI_ACL_HDR_SIZE);
3301
3302 handle = __le16_to_cpu(hdr->handle);
3303 flags = hci_flags(handle);
3304 handle = hci_handle(handle);
3305
f0e09510 3306 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3307 handle, flags);
1da177e4
LT
3308
3309 hdev->stat.acl_rx++;
3310
3311 hci_dev_lock(hdev);
3312 conn = hci_conn_hash_lookup_handle(hdev, handle);
3313 hci_dev_unlock(hdev);
8e87d142 3314
1da177e4 3315 if (conn) {
65983fc7 3316 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3317
1da177e4 3318 /* Send to upper protocol */
686ebf28
UF
3319 l2cap_recv_acldata(conn, skb, flags);
3320 return;
1da177e4 3321 } else {
8e87d142 3322 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3323 hdev->name, handle);
1da177e4
LT
3324 }
3325
3326 kfree_skb(skb);
3327}
3328
3329/* SCO data packet */
6039aa73 3330static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3331{
3332 struct hci_sco_hdr *hdr = (void *) skb->data;
3333 struct hci_conn *conn;
3334 __u16 handle;
3335
3336 skb_pull(skb, HCI_SCO_HDR_SIZE);
3337
3338 handle = __le16_to_cpu(hdr->handle);
3339
f0e09510 3340 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3341
3342 hdev->stat.sco_rx++;
3343
3344 hci_dev_lock(hdev);
3345 conn = hci_conn_hash_lookup_handle(hdev, handle);
3346 hci_dev_unlock(hdev);
3347
3348 if (conn) {
1da177e4 3349 /* Send to upper protocol */
686ebf28
UF
3350 sco_recv_scodata(conn, skb);
3351 return;
1da177e4 3352 } else {
8e87d142 3353 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3354 hdev->name, handle);
1da177e4
LT
3355 }
3356
3357 kfree_skb(skb);
3358}
3359
9238f36a
JH
3360static bool hci_req_is_complete(struct hci_dev *hdev)
3361{
3362 struct sk_buff *skb;
3363
3364 skb = skb_peek(&hdev->cmd_q);
3365 if (!skb)
3366 return true;
3367
3368 return bt_cb(skb)->req.start;
3369}
3370
42c6b129
JH
3371static void hci_resend_last(struct hci_dev *hdev)
3372{
3373 struct hci_command_hdr *sent;
3374 struct sk_buff *skb;
3375 u16 opcode;
3376
3377 if (!hdev->sent_cmd)
3378 return;
3379
3380 sent = (void *) hdev->sent_cmd->data;
3381 opcode = __le16_to_cpu(sent->opcode);
3382 if (opcode == HCI_OP_RESET)
3383 return;
3384
3385 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3386 if (!skb)
3387 return;
3388
3389 skb_queue_head(&hdev->cmd_q, skb);
3390 queue_work(hdev->workqueue, &hdev->cmd_work);
3391}
3392
9238f36a
JH
3393void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3394{
3395 hci_req_complete_t req_complete = NULL;
3396 struct sk_buff *skb;
3397 unsigned long flags;
3398
3399 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3400
42c6b129
JH
3401 /* If the completed command doesn't match the last one that was
3402 * sent we need to do special handling of it.
9238f36a 3403 */
42c6b129
JH
3404 if (!hci_sent_cmd_data(hdev, opcode)) {
3405 /* Some CSR based controllers generate a spontaneous
3406 * reset complete event during init and any pending
3407 * command will never be completed. In such a case we
3408 * need to resend whatever was the last sent
3409 * command.
3410 */
3411 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3412 hci_resend_last(hdev);
3413
9238f36a 3414 return;
42c6b129 3415 }
9238f36a
JH
3416
3417 /* If the command succeeded and there's still more commands in
3418 * this request the request is not yet complete.
3419 */
3420 if (!status && !hci_req_is_complete(hdev))
3421 return;
3422
3423 /* If this was the last command in a request the complete
3424 * callback would be found in hdev->sent_cmd instead of the
3425 * command queue (hdev->cmd_q).
3426 */
3427 if (hdev->sent_cmd) {
3428 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3429 if (req_complete)
3430 goto call_complete;
3431 }
3432
3433 /* Remove all pending commands belonging to this request */
3434 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3435 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3436 if (bt_cb(skb)->req.start) {
3437 __skb_queue_head(&hdev->cmd_q, skb);
3438 break;
3439 }
3440
3441 req_complete = bt_cb(skb)->req.complete;
3442 kfree_skb(skb);
3443 }
3444 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3445
3446call_complete:
3447 if (req_complete)
3448 req_complete(hdev, status);
3449}
3450
b78752cc 3451static void hci_rx_work(struct work_struct *work)
1da177e4 3452{
b78752cc 3453 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3454 struct sk_buff *skb;
3455
3456 BT_DBG("%s", hdev->name);
3457
1da177e4 3458 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3459 /* Send copy to monitor */
3460 hci_send_to_monitor(hdev, skb);
3461
1da177e4
LT
3462 if (atomic_read(&hdev->promisc)) {
3463 /* Send copy to the sockets */
470fe1b5 3464 hci_send_to_sock(hdev, skb);
1da177e4
LT
3465 }
3466
3467 if (test_bit(HCI_RAW, &hdev->flags)) {
3468 kfree_skb(skb);
3469 continue;
3470 }
3471
3472 if (test_bit(HCI_INIT, &hdev->flags)) {
3473 /* Don't process data packets in this states. */
0d48d939 3474 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3475 case HCI_ACLDATA_PKT:
3476 case HCI_SCODATA_PKT:
3477 kfree_skb(skb);
3478 continue;
3ff50b79 3479 }
1da177e4
LT
3480 }
3481
3482 /* Process frame */
0d48d939 3483 switch (bt_cb(skb)->pkt_type) {
1da177e4 3484 case HCI_EVENT_PKT:
b78752cc 3485 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3486 hci_event_packet(hdev, skb);
3487 break;
3488
3489 case HCI_ACLDATA_PKT:
3490 BT_DBG("%s ACL data packet", hdev->name);
3491 hci_acldata_packet(hdev, skb);
3492 break;
3493
3494 case HCI_SCODATA_PKT:
3495 BT_DBG("%s SCO data packet", hdev->name);
3496 hci_scodata_packet(hdev, skb);
3497 break;
3498
3499 default:
3500 kfree_skb(skb);
3501 break;
3502 }
3503 }
1da177e4
LT
3504}
3505
c347b765 3506static void hci_cmd_work(struct work_struct *work)
1da177e4 3507{
c347b765 3508 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3509 struct sk_buff *skb;
3510
2104786b
AE
3511 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3512 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3513
1da177e4 3514 /* Send queued commands */
5a08ecce
AE
3515 if (atomic_read(&hdev->cmd_cnt)) {
3516 skb = skb_dequeue(&hdev->cmd_q);
3517 if (!skb)
3518 return;
3519
7585b97a 3520 kfree_skb(hdev->sent_cmd);
1da177e4 3521
70f23020
AE
3522 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3523 if (hdev->sent_cmd) {
1da177e4
LT
3524 atomic_dec(&hdev->cmd_cnt);
3525 hci_send_frame(skb);
7bdb8a5c
SJ
3526 if (test_bit(HCI_RESET, &hdev->flags))
3527 del_timer(&hdev->cmd_timer);
3528 else
3529 mod_timer(&hdev->cmd_timer,
5f246e89 3530 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3531 } else {
3532 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3533 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3534 }
3535 }
3536}
2519a1fc
AG
3537
3538int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3539{
3540 /* General inquiry access code (GIAC) */
3541 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3542 struct hci_cp_inquiry cp;
3543
3544 BT_DBG("%s", hdev->name);
3545
3546 if (test_bit(HCI_INQUIRY, &hdev->flags))
3547 return -EINPROGRESS;
3548
4663262c
JH
3549 inquiry_cache_flush(hdev);
3550
2519a1fc
AG
3551 memset(&cp, 0, sizeof(cp));
3552 memcpy(&cp.lap, lap, sizeof(cp.lap));
3553 cp.length = length;
3554
3555 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3556}
023d5049
AG
3557
3558int hci_cancel_inquiry(struct hci_dev *hdev)
3559{
3560 BT_DBG("%s", hdev->name);
3561
3562 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 3563 return -EALREADY;
023d5049
AG
3564
3565 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3566}
31f7956c
AG
3567
3568u8 bdaddr_to_le(u8 bdaddr_type)
3569{
3570 switch (bdaddr_type) {
3571 case BDADDR_LE_PUBLIC:
3572 return ADDR_LE_DEV_PUBLIC;
3573
3574 default:
3575 /* Fallback to LE Random address type */
3576 return ADDR_LE_DEV_RANDOM;
3577 }
3578}