Bluetooth: Add __hci_cmd_sync() helper function
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
75e84b7c
JH
82struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode)
83{
84 struct hci_ev_cmd_complete *ev;
85 struct hci_event_hdr *hdr;
86 struct sk_buff *skb;
87
88 hci_dev_lock(hdev);
89
90 skb = hdev->recv_evt;
91 hdev->recv_evt = NULL;
92
93 hci_dev_unlock(hdev);
94
95 if (!skb)
96 return ERR_PTR(-ENODATA);
97
98 if (skb->len < sizeof(*hdr)) {
99 BT_ERR("Too short HCI event");
100 goto failed;
101 }
102
103 hdr = (void *) skb->data;
104 skb_pull(skb, HCI_EVENT_HDR_SIZE);
105
106 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
107 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
108 goto failed;
109 }
110
111 if (skb->len < sizeof(*ev)) {
112 BT_ERR("Too short cmd_complete event");
113 goto failed;
114 }
115
116 ev = (void *) skb->data;
117 skb_pull(skb, sizeof(*ev));
118
119 if (opcode == __le16_to_cpu(ev->opcode))
120 return skb;
121
122 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
123 __le16_to_cpu(ev->opcode));
124
125failed:
126 kfree_skb(skb);
127 return ERR_PTR(-ENODATA);
128}
129
130struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
131 void *param, u32 timeout)
132{
133 DECLARE_WAITQUEUE(wait, current);
134 struct hci_request req;
135 int err = 0;
136
137 BT_DBG("%s", hdev->name);
138
139 hci_req_init(&req, hdev);
140
141 hci_req_add(&req, opcode, plen, param);
142
143 hdev->req_status = HCI_REQ_PEND;
144
145 err = hci_req_run(&req, hci_req_sync_complete);
146 if (err < 0)
147 return ERR_PTR(err);
148
149 add_wait_queue(&hdev->req_wait_q, &wait);
150 set_current_state(TASK_INTERRUPTIBLE);
151
152 schedule_timeout(timeout);
153
154 remove_wait_queue(&hdev->req_wait_q, &wait);
155
156 if (signal_pending(current))
157 return ERR_PTR(-EINTR);
158
159 switch (hdev->req_status) {
160 case HCI_REQ_DONE:
161 err = -bt_to_errno(hdev->req_result);
162 break;
163
164 case HCI_REQ_CANCELED:
165 err = -hdev->req_result;
166 break;
167
168 default:
169 err = -ETIMEDOUT;
170 break;
171 }
172
173 hdev->req_status = hdev->req_result = 0;
174
175 BT_DBG("%s end: err %d", hdev->name, err);
176
177 if (err < 0)
178 return ERR_PTR(err);
179
180 return hci_get_cmd_complete(hdev, opcode);
181}
182EXPORT_SYMBOL(__hci_cmd_sync);
183
1da177e4 184/* Execute request and wait for completion. */
01178cd4 185static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
186 void (*func)(struct hci_request *req,
187 unsigned long opt),
01178cd4 188 unsigned long opt, __u32 timeout)
1da177e4 189{
42c6b129 190 struct hci_request req;
1da177e4
LT
191 DECLARE_WAITQUEUE(wait, current);
192 int err = 0;
193
194 BT_DBG("%s start", hdev->name);
195
42c6b129
JH
196 hci_req_init(&req, hdev);
197
1da177e4
LT
198 hdev->req_status = HCI_REQ_PEND;
199
42c6b129 200 func(&req, opt);
53cce22d 201
42c6b129
JH
202 err = hci_req_run(&req, hci_req_sync_complete);
203 if (err < 0) {
53cce22d 204 hdev->req_status = 0;
920c8300
AG
205
206 /* ENODATA means the HCI request command queue is empty.
207 * This can happen when a request with conditionals doesn't
208 * trigger any commands to be sent. This is normal behavior
209 * and should not trigger an error return.
42c6b129 210 */
920c8300
AG
211 if (err == -ENODATA)
212 return 0;
213
214 return err;
53cce22d
JH
215 }
216
bc4445c7
AG
217 add_wait_queue(&hdev->req_wait_q, &wait);
218 set_current_state(TASK_INTERRUPTIBLE);
219
1da177e4
LT
220 schedule_timeout(timeout);
221
222 remove_wait_queue(&hdev->req_wait_q, &wait);
223
224 if (signal_pending(current))
225 return -EINTR;
226
227 switch (hdev->req_status) {
228 case HCI_REQ_DONE:
e175072f 229 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
230 break;
231
232 case HCI_REQ_CANCELED:
233 err = -hdev->req_result;
234 break;
235
236 default:
237 err = -ETIMEDOUT;
238 break;
3ff50b79 239 }
1da177e4 240
a5040efa 241 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
242
243 BT_DBG("%s end: err %d", hdev->name, err);
244
245 return err;
246}
247
01178cd4 248static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
249 void (*req)(struct hci_request *req,
250 unsigned long opt),
01178cd4 251 unsigned long opt, __u32 timeout)
1da177e4
LT
252{
253 int ret;
254
7c6a329e
MH
255 if (!test_bit(HCI_UP, &hdev->flags))
256 return -ENETDOWN;
257
1da177e4
LT
258 /* Serialize all requests */
259 hci_req_lock(hdev);
01178cd4 260 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
261 hci_req_unlock(hdev);
262
263 return ret;
264}
265
42c6b129 266static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 267{
42c6b129 268 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
269
270 /* Reset device */
42c6b129
JH
271 set_bit(HCI_RESET, &req->hdev->flags);
272 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
273}
274
42c6b129 275static void bredr_init(struct hci_request *req)
1da177e4 276{
42c6b129 277 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 278
1da177e4 279 /* Read Local Supported Features */
42c6b129 280 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 281
1143e5a6 282 /* Read Local Version */
42c6b129 283 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
284
285 /* Read BD Address */
42c6b129 286 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
287}
288
42c6b129 289static void amp_init(struct hci_request *req)
e61ef499 290{
42c6b129 291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 292
e61ef499 293 /* Read Local Version */
42c6b129 294 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
295
296 /* Read Local AMP Info */
42c6b129 297 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
298
299 /* Read Data Blk size */
42c6b129 300 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
301}
302
42c6b129 303static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 304{
42c6b129
JH
305 struct hci_dev *hdev = req->hdev;
306 struct hci_request init_req;
e61ef499
AE
307 struct sk_buff *skb;
308
309 BT_DBG("%s %ld", hdev->name, opt);
310
311 /* Driver initialization */
312
42c6b129
JH
313 hci_req_init(&init_req, hdev);
314
e61ef499
AE
315 /* Special commands */
316 while ((skb = skb_dequeue(&hdev->driver_init))) {
317 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
318 skb->dev = (void *) hdev;
319
42c6b129
JH
320 if (skb_queue_empty(&init_req.cmd_q))
321 bt_cb(skb)->req.start = true;
322
323 skb_queue_tail(&init_req.cmd_q, skb);
e61ef499
AE
324 }
325 skb_queue_purge(&hdev->driver_init);
326
42c6b129
JH
327 hci_req_run(&init_req, NULL);
328
11778716
AE
329 /* Reset */
330 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 331 hci_reset_req(req, 0);
11778716 332
e61ef499
AE
333 switch (hdev->dev_type) {
334 case HCI_BREDR:
42c6b129 335 bredr_init(req);
e61ef499
AE
336 break;
337
338 case HCI_AMP:
42c6b129 339 amp_init(req);
e61ef499
AE
340 break;
341
342 default:
343 BT_ERR("Unknown device type %d", hdev->dev_type);
344 break;
345 }
e61ef499
AE
346}
347
42c6b129 348static void bredr_setup(struct hci_request *req)
2177bab5
JH
349{
350 struct hci_cp_delete_stored_link_key cp;
351 __le16 param;
352 __u8 flt_type;
353
354 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 355 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
356
357 /* Read Class of Device */
42c6b129 358 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
359
360 /* Read Local Name */
42c6b129 361 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
362
363 /* Read Voice Setting */
42c6b129 364 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
365
366 /* Clear Event Filters */
367 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 368 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
369
370 /* Connection accept timeout ~20 secs */
371 param = __constant_cpu_to_le16(0x7d00);
42c6b129 372 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
373
374 bacpy(&cp.bdaddr, BDADDR_ANY);
375 cp.delete_all = 0x01;
42c6b129 376 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
f332ec66
JH
377
378 /* Read page scan parameters */
379 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
380 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
381 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
382 }
2177bab5
JH
383}
384
42c6b129 385static void le_setup(struct hci_request *req)
2177bab5
JH
386{
387 /* Read LE Buffer Size */
42c6b129 388 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
389
390 /* Read LE Local Supported Features */
42c6b129 391 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
392
393 /* Read LE Advertising Channel TX Power */
42c6b129 394 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
395
396 /* Read LE White List Size */
42c6b129 397 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
398
399 /* Read LE Supported States */
42c6b129 400 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
2177bab5
JH
401}
402
403static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
404{
405 if (lmp_ext_inq_capable(hdev))
406 return 0x02;
407
408 if (lmp_inq_rssi_capable(hdev))
409 return 0x01;
410
411 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
412 hdev->lmp_subver == 0x0757)
413 return 0x01;
414
415 if (hdev->manufacturer == 15) {
416 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
417 return 0x01;
418 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
419 return 0x01;
420 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
421 return 0x01;
422 }
423
424 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
425 hdev->lmp_subver == 0x1805)
426 return 0x01;
427
428 return 0x00;
429}
430
42c6b129 431static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
432{
433 u8 mode;
434
42c6b129 435 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 436
42c6b129 437 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
438}
439
42c6b129 440static void hci_setup_event_mask(struct hci_request *req)
2177bab5 441{
42c6b129
JH
442 struct hci_dev *hdev = req->hdev;
443
2177bab5
JH
444 /* The second byte is 0xff instead of 0x9f (two reserved bits
445 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
446 * command otherwise.
447 */
448 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
449
450 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
451 * any event mask for pre 1.2 devices.
452 */
453 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
454 return;
455
456 if (lmp_bredr_capable(hdev)) {
457 events[4] |= 0x01; /* Flow Specification Complete */
458 events[4] |= 0x02; /* Inquiry Result with RSSI */
459 events[4] |= 0x04; /* Read Remote Extended Features Complete */
460 events[5] |= 0x08; /* Synchronous Connection Complete */
461 events[5] |= 0x10; /* Synchronous Connection Changed */
462 }
463
464 if (lmp_inq_rssi_capable(hdev))
465 events[4] |= 0x02; /* Inquiry Result with RSSI */
466
467 if (lmp_sniffsubr_capable(hdev))
468 events[5] |= 0x20; /* Sniff Subrating */
469
470 if (lmp_pause_enc_capable(hdev))
471 events[5] |= 0x80; /* Encryption Key Refresh Complete */
472
473 if (lmp_ext_inq_capable(hdev))
474 events[5] |= 0x40; /* Extended Inquiry Result */
475
476 if (lmp_no_flush_capable(hdev))
477 events[7] |= 0x01; /* Enhanced Flush Complete */
478
479 if (lmp_lsto_capable(hdev))
480 events[6] |= 0x80; /* Link Supervision Timeout Changed */
481
482 if (lmp_ssp_capable(hdev)) {
483 events[6] |= 0x01; /* IO Capability Request */
484 events[6] |= 0x02; /* IO Capability Response */
485 events[6] |= 0x04; /* User Confirmation Request */
486 events[6] |= 0x08; /* User Passkey Request */
487 events[6] |= 0x10; /* Remote OOB Data Request */
488 events[6] |= 0x20; /* Simple Pairing Complete */
489 events[7] |= 0x04; /* User Passkey Notification */
490 events[7] |= 0x08; /* Keypress Notification */
491 events[7] |= 0x10; /* Remote Host Supported
492 * Features Notification
493 */
494 }
495
496 if (lmp_le_capable(hdev))
497 events[7] |= 0x20; /* LE Meta-Event */
498
42c6b129 499 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
500
501 if (lmp_le_capable(hdev)) {
502 memset(events, 0, sizeof(events));
503 events[0] = 0x1f;
42c6b129
JH
504 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
505 sizeof(events), events);
2177bab5
JH
506 }
507}
508
42c6b129 509static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 510{
42c6b129
JH
511 struct hci_dev *hdev = req->hdev;
512
2177bab5 513 if (lmp_bredr_capable(hdev))
42c6b129 514 bredr_setup(req);
2177bab5
JH
515
516 if (lmp_le_capable(hdev))
42c6b129 517 le_setup(req);
2177bab5 518
42c6b129 519 hci_setup_event_mask(req);
2177bab5
JH
520
521 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 522 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
523
524 if (lmp_ssp_capable(hdev)) {
525 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
526 u8 mode = 0x01;
42c6b129
JH
527 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
528 sizeof(mode), &mode);
2177bab5
JH
529 } else {
530 struct hci_cp_write_eir cp;
531
532 memset(hdev->eir, 0, sizeof(hdev->eir));
533 memset(&cp, 0, sizeof(cp));
534
42c6b129 535 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
536 }
537 }
538
539 if (lmp_inq_rssi_capable(hdev))
42c6b129 540 hci_setup_inquiry_mode(req);
2177bab5
JH
541
542 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 543 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
544
545 if (lmp_ext_feat_capable(hdev)) {
546 struct hci_cp_read_local_ext_features cp;
547
548 cp.page = 0x01;
42c6b129
JH
549 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
550 sizeof(cp), &cp);
2177bab5
JH
551 }
552
553 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
554 u8 enable = 1;
42c6b129
JH
555 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
556 &enable);
2177bab5
JH
557 }
558}
559
42c6b129 560static void hci_setup_link_policy(struct hci_request *req)
2177bab5 561{
42c6b129 562 struct hci_dev *hdev = req->hdev;
2177bab5
JH
563 struct hci_cp_write_def_link_policy cp;
564 u16 link_policy = 0;
565
566 if (lmp_rswitch_capable(hdev))
567 link_policy |= HCI_LP_RSWITCH;
568 if (lmp_hold_capable(hdev))
569 link_policy |= HCI_LP_HOLD;
570 if (lmp_sniff_capable(hdev))
571 link_policy |= HCI_LP_SNIFF;
572 if (lmp_park_capable(hdev))
573 link_policy |= HCI_LP_PARK;
574
575 cp.policy = cpu_to_le16(link_policy);
42c6b129 576 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
577}
578
42c6b129 579static void hci_set_le_support(struct hci_request *req)
2177bab5 580{
42c6b129 581 struct hci_dev *hdev = req->hdev;
2177bab5
JH
582 struct hci_cp_write_le_host_supported cp;
583
584 memset(&cp, 0, sizeof(cp));
585
586 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
587 cp.le = 0x01;
588 cp.simul = lmp_le_br_capable(hdev);
589 }
590
591 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
592 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
593 &cp);
2177bab5
JH
594}
595
42c6b129 596static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 597{
42c6b129
JH
598 struct hci_dev *hdev = req->hdev;
599
2177bab5 600 if (hdev->commands[5] & 0x10)
42c6b129 601 hci_setup_link_policy(req);
2177bab5 602
04b4edcb 603 if (lmp_le_capable(hdev)) {
42c6b129 604 hci_set_le_support(req);
04b4edcb
JH
605 hci_update_ad(req);
606 }
2177bab5
JH
607}
608
609static int __hci_init(struct hci_dev *hdev)
610{
611 int err;
612
613 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
614 if (err < 0)
615 return err;
616
617 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
618 * BR/EDR/LE type controllers. AMP controllers only need the
619 * first stage init.
620 */
621 if (hdev->dev_type != HCI_BREDR)
622 return 0;
623
624 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
625 if (err < 0)
626 return err;
627
628 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
629}
630
42c6b129 631static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
632{
633 __u8 scan = opt;
634
42c6b129 635 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
636
637 /* Inquiry and Page scans */
42c6b129 638 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
639}
640
42c6b129 641static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
642{
643 __u8 auth = opt;
644
42c6b129 645 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
646
647 /* Authentication */
42c6b129 648 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
649}
650
42c6b129 651static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
652{
653 __u8 encrypt = opt;
654
42c6b129 655 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 656
e4e8e37c 657 /* Encryption */
42c6b129 658 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
659}
660
42c6b129 661static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
662{
663 __le16 policy = cpu_to_le16(opt);
664
42c6b129 665 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
666
667 /* Default link policy */
42c6b129 668 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
669}
670
8e87d142 671/* Get HCI device by index.
1da177e4
LT
672 * Device is held on return. */
673struct hci_dev *hci_dev_get(int index)
674{
8035ded4 675 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
676
677 BT_DBG("%d", index);
678
679 if (index < 0)
680 return NULL;
681
682 read_lock(&hci_dev_list_lock);
8035ded4 683 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
684 if (d->id == index) {
685 hdev = hci_dev_hold(d);
686 break;
687 }
688 }
689 read_unlock(&hci_dev_list_lock);
690 return hdev;
691}
1da177e4
LT
692
693/* ---- Inquiry support ---- */
ff9ef578 694
30dc78e1
JH
695bool hci_discovery_active(struct hci_dev *hdev)
696{
697 struct discovery_state *discov = &hdev->discovery;
698
6fbe195d 699 switch (discov->state) {
343f935b 700 case DISCOVERY_FINDING:
6fbe195d 701 case DISCOVERY_RESOLVING:
30dc78e1
JH
702 return true;
703
6fbe195d
AG
704 default:
705 return false;
706 }
30dc78e1
JH
707}
708
ff9ef578
JH
709void hci_discovery_set_state(struct hci_dev *hdev, int state)
710{
711 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
712
713 if (hdev->discovery.state == state)
714 return;
715
716 switch (state) {
717 case DISCOVERY_STOPPED:
7b99b659
AG
718 if (hdev->discovery.state != DISCOVERY_STARTING)
719 mgmt_discovering(hdev, 0);
ff9ef578
JH
720 break;
721 case DISCOVERY_STARTING:
722 break;
343f935b 723 case DISCOVERY_FINDING:
ff9ef578
JH
724 mgmt_discovering(hdev, 1);
725 break;
30dc78e1
JH
726 case DISCOVERY_RESOLVING:
727 break;
ff9ef578
JH
728 case DISCOVERY_STOPPING:
729 break;
730 }
731
732 hdev->discovery.state = state;
733}
734
1da177e4
LT
735static void inquiry_cache_flush(struct hci_dev *hdev)
736{
30883512 737 struct discovery_state *cache = &hdev->discovery;
b57c1a56 738 struct inquiry_entry *p, *n;
1da177e4 739
561aafbc
JH
740 list_for_each_entry_safe(p, n, &cache->all, all) {
741 list_del(&p->all);
b57c1a56 742 kfree(p);
1da177e4 743 }
561aafbc
JH
744
745 INIT_LIST_HEAD(&cache->unknown);
746 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
747}
748
a8c5fb1a
GP
749struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
750 bdaddr_t *bdaddr)
1da177e4 751{
30883512 752 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
753 struct inquiry_entry *e;
754
6ed93dc6 755 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 756
561aafbc
JH
757 list_for_each_entry(e, &cache->all, all) {
758 if (!bacmp(&e->data.bdaddr, bdaddr))
759 return e;
760 }
761
762 return NULL;
763}
764
765struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 766 bdaddr_t *bdaddr)
561aafbc 767{
30883512 768 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
769 struct inquiry_entry *e;
770
6ed93dc6 771 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
772
773 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 774 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
775 return e;
776 }
777
778 return NULL;
1da177e4
LT
779}
780
30dc78e1 781struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
782 bdaddr_t *bdaddr,
783 int state)
30dc78e1
JH
784{
785 struct discovery_state *cache = &hdev->discovery;
786 struct inquiry_entry *e;
787
6ed93dc6 788 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
789
790 list_for_each_entry(e, &cache->resolve, list) {
791 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
792 return e;
793 if (!bacmp(&e->data.bdaddr, bdaddr))
794 return e;
795 }
796
797 return NULL;
798}
799
a3d4e20a 800void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 801 struct inquiry_entry *ie)
a3d4e20a
JH
802{
803 struct discovery_state *cache = &hdev->discovery;
804 struct list_head *pos = &cache->resolve;
805 struct inquiry_entry *p;
806
807 list_del(&ie->list);
808
809 list_for_each_entry(p, &cache->resolve, list) {
810 if (p->name_state != NAME_PENDING &&
a8c5fb1a 811 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
812 break;
813 pos = &p->list;
814 }
815
816 list_add(&ie->list, pos);
817}
818
3175405b 819bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 820 bool name_known, bool *ssp)
1da177e4 821{
30883512 822 struct discovery_state *cache = &hdev->discovery;
70f23020 823 struct inquiry_entry *ie;
1da177e4 824
6ed93dc6 825 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 826
2b2fec4d
SJ
827 hci_remove_remote_oob_data(hdev, &data->bdaddr);
828
388fc8fa
JH
829 if (ssp)
830 *ssp = data->ssp_mode;
831
70f23020 832 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 833 if (ie) {
388fc8fa
JH
834 if (ie->data.ssp_mode && ssp)
835 *ssp = true;
836
a3d4e20a 837 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 838 data->rssi != ie->data.rssi) {
a3d4e20a
JH
839 ie->data.rssi = data->rssi;
840 hci_inquiry_cache_update_resolve(hdev, ie);
841 }
842
561aafbc 843 goto update;
a3d4e20a 844 }
561aafbc
JH
845
846 /* Entry not in the cache. Add new one. */
847 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
848 if (!ie)
3175405b 849 return false;
561aafbc
JH
850
851 list_add(&ie->all, &cache->all);
852
853 if (name_known) {
854 ie->name_state = NAME_KNOWN;
855 } else {
856 ie->name_state = NAME_NOT_KNOWN;
857 list_add(&ie->list, &cache->unknown);
858 }
70f23020 859
561aafbc
JH
860update:
861 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 862 ie->name_state != NAME_PENDING) {
561aafbc
JH
863 ie->name_state = NAME_KNOWN;
864 list_del(&ie->list);
1da177e4
LT
865 }
866
70f23020
AE
867 memcpy(&ie->data, data, sizeof(*data));
868 ie->timestamp = jiffies;
1da177e4 869 cache->timestamp = jiffies;
3175405b
JH
870
871 if (ie->name_state == NAME_NOT_KNOWN)
872 return false;
873
874 return true;
1da177e4
LT
875}
876
877static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
878{
30883512 879 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
880 struct inquiry_info *info = (struct inquiry_info *) buf;
881 struct inquiry_entry *e;
882 int copied = 0;
883
561aafbc 884 list_for_each_entry(e, &cache->all, all) {
1da177e4 885 struct inquiry_data *data = &e->data;
b57c1a56
JH
886
887 if (copied >= num)
888 break;
889
1da177e4
LT
890 bacpy(&info->bdaddr, &data->bdaddr);
891 info->pscan_rep_mode = data->pscan_rep_mode;
892 info->pscan_period_mode = data->pscan_period_mode;
893 info->pscan_mode = data->pscan_mode;
894 memcpy(info->dev_class, data->dev_class, 3);
895 info->clock_offset = data->clock_offset;
b57c1a56 896
1da177e4 897 info++;
b57c1a56 898 copied++;
1da177e4
LT
899 }
900
901 BT_DBG("cache %p, copied %d", cache, copied);
902 return copied;
903}
904
42c6b129 905static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
906{
907 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 908 struct hci_dev *hdev = req->hdev;
1da177e4
LT
909 struct hci_cp_inquiry cp;
910
911 BT_DBG("%s", hdev->name);
912
913 if (test_bit(HCI_INQUIRY, &hdev->flags))
914 return;
915
916 /* Start Inquiry */
917 memcpy(&cp.lap, &ir->lap, 3);
918 cp.length = ir->length;
919 cp.num_rsp = ir->num_rsp;
42c6b129 920 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
921}
922
3e13fa1e
AG
923static int wait_inquiry(void *word)
924{
925 schedule();
926 return signal_pending(current);
927}
928
1da177e4
LT
929int hci_inquiry(void __user *arg)
930{
931 __u8 __user *ptr = arg;
932 struct hci_inquiry_req ir;
933 struct hci_dev *hdev;
934 int err = 0, do_inquiry = 0, max_rsp;
935 long timeo;
936 __u8 *buf;
937
938 if (copy_from_user(&ir, ptr, sizeof(ir)))
939 return -EFAULT;
940
5a08ecce
AE
941 hdev = hci_dev_get(ir.dev_id);
942 if (!hdev)
1da177e4
LT
943 return -ENODEV;
944
09fd0de5 945 hci_dev_lock(hdev);
8e87d142 946 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 947 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
948 inquiry_cache_flush(hdev);
949 do_inquiry = 1;
950 }
09fd0de5 951 hci_dev_unlock(hdev);
1da177e4 952
04837f64 953 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
954
955 if (do_inquiry) {
01178cd4
JH
956 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
957 timeo);
70f23020
AE
958 if (err < 0)
959 goto done;
3e13fa1e
AG
960
961 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
962 * cleared). If it is interrupted by a signal, return -EINTR.
963 */
964 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
965 TASK_INTERRUPTIBLE))
966 return -EINTR;
70f23020 967 }
1da177e4 968
8fc9ced3
GP
969 /* for unlimited number of responses we will use buffer with
970 * 255 entries
971 */
1da177e4
LT
972 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
973
974 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
975 * copy it to the user space.
976 */
01df8c31 977 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 978 if (!buf) {
1da177e4
LT
979 err = -ENOMEM;
980 goto done;
981 }
982
09fd0de5 983 hci_dev_lock(hdev);
1da177e4 984 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 985 hci_dev_unlock(hdev);
1da177e4
LT
986
987 BT_DBG("num_rsp %d", ir.num_rsp);
988
989 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
990 ptr += sizeof(ir);
991 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 992 ir.num_rsp))
1da177e4 993 err = -EFAULT;
8e87d142 994 } else
1da177e4
LT
995 err = -EFAULT;
996
997 kfree(buf);
998
999done:
1000 hci_dev_put(hdev);
1001 return err;
1002}
1003
3f0f524b
JH
1004static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1005{
1006 u8 ad_len = 0, flags = 0;
1007 size_t name_len;
1008
1009 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1010 flags |= LE_AD_GENERAL;
1011
1012 if (!lmp_bredr_capable(hdev))
1013 flags |= LE_AD_NO_BREDR;
1014
1015 if (lmp_le_br_capable(hdev))
1016 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1017
1018 if (lmp_host_le_br_capable(hdev))
1019 flags |= LE_AD_SIM_LE_BREDR_HOST;
1020
1021 if (flags) {
1022 BT_DBG("adv flags 0x%02x", flags);
1023
1024 ptr[0] = 2;
1025 ptr[1] = EIR_FLAGS;
1026 ptr[2] = flags;
1027
1028 ad_len += 3;
1029 ptr += 3;
1030 }
1031
1032 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1033 ptr[0] = 2;
1034 ptr[1] = EIR_TX_POWER;
1035 ptr[2] = (u8) hdev->adv_tx_power;
1036
1037 ad_len += 3;
1038 ptr += 3;
1039 }
1040
1041 name_len = strlen(hdev->dev_name);
1042 if (name_len > 0) {
1043 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1044
1045 if (name_len > max_len) {
1046 name_len = max_len;
1047 ptr[1] = EIR_NAME_SHORT;
1048 } else
1049 ptr[1] = EIR_NAME_COMPLETE;
1050
1051 ptr[0] = name_len + 1;
1052
1053 memcpy(ptr + 2, hdev->dev_name, name_len);
1054
1055 ad_len += (name_len + 2);
1056 ptr += (name_len + 2);
1057 }
1058
1059 return ad_len;
1060}
1061
04b4edcb 1062void hci_update_ad(struct hci_request *req)
3f0f524b 1063{
04b4edcb 1064 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
1065 struct hci_cp_le_set_adv_data cp;
1066 u8 len;
3f0f524b 1067
04b4edcb
JH
1068 if (!lmp_le_capable(hdev))
1069 return;
3f0f524b
JH
1070
1071 memset(&cp, 0, sizeof(cp));
1072
1073 len = create_ad(hdev, cp.data);
1074
1075 if (hdev->adv_data_len == len &&
04b4edcb
JH
1076 memcmp(cp.data, hdev->adv_data, len) == 0)
1077 return;
3f0f524b
JH
1078
1079 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1080 hdev->adv_data_len = len;
1081
1082 cp.length = len;
3f0f524b 1083
04b4edcb 1084 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
1085}
1086
1da177e4
LT
1087/* ---- HCI ioctl helpers ---- */
1088
1089int hci_dev_open(__u16 dev)
1090{
1091 struct hci_dev *hdev;
1092 int ret = 0;
1093
5a08ecce
AE
1094 hdev = hci_dev_get(dev);
1095 if (!hdev)
1da177e4
LT
1096 return -ENODEV;
1097
1098 BT_DBG("%s %p", hdev->name, hdev);
1099
1100 hci_req_lock(hdev);
1101
94324962
JH
1102 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1103 ret = -ENODEV;
1104 goto done;
1105 }
1106
611b30f7
MH
1107 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1108 ret = -ERFKILL;
1109 goto done;
1110 }
1111
1da177e4
LT
1112 if (test_bit(HCI_UP, &hdev->flags)) {
1113 ret = -EALREADY;
1114 goto done;
1115 }
1116
1117 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1118 set_bit(HCI_RAW, &hdev->flags);
1119
07e3b94a
AE
1120 /* Treat all non BR/EDR controllers as raw devices if
1121 enable_hs is not set */
1122 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
1123 set_bit(HCI_RAW, &hdev->flags);
1124
1da177e4
LT
1125 if (hdev->open(hdev)) {
1126 ret = -EIO;
1127 goto done;
1128 }
1129
1130 if (!test_bit(HCI_RAW, &hdev->flags)) {
1131 atomic_set(&hdev->cmd_cnt, 1);
1132 set_bit(HCI_INIT, &hdev->flags);
2177bab5 1133 ret = __hci_init(hdev);
1da177e4
LT
1134 clear_bit(HCI_INIT, &hdev->flags);
1135 }
1136
1137 if (!ret) {
1138 hci_dev_hold(hdev);
1139 set_bit(HCI_UP, &hdev->flags);
1140 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a
AE
1141 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1142 mgmt_valid_hdev(hdev)) {
09fd0de5 1143 hci_dev_lock(hdev);
744cf19e 1144 mgmt_powered(hdev, 1);
09fd0de5 1145 hci_dev_unlock(hdev);
56e5cb86 1146 }
8e87d142 1147 } else {
1da177e4 1148 /* Init failed, cleanup */
3eff45ea 1149 flush_work(&hdev->tx_work);
c347b765 1150 flush_work(&hdev->cmd_work);
b78752cc 1151 flush_work(&hdev->rx_work);
1da177e4
LT
1152
1153 skb_queue_purge(&hdev->cmd_q);
1154 skb_queue_purge(&hdev->rx_q);
1155
1156 if (hdev->flush)
1157 hdev->flush(hdev);
1158
1159 if (hdev->sent_cmd) {
1160 kfree_skb(hdev->sent_cmd);
1161 hdev->sent_cmd = NULL;
1162 }
1163
1164 hdev->close(hdev);
1165 hdev->flags = 0;
1166 }
1167
1168done:
1169 hci_req_unlock(hdev);
1170 hci_dev_put(hdev);
1171 return ret;
1172}
1173
1174static int hci_dev_do_close(struct hci_dev *hdev)
1175{
1176 BT_DBG("%s %p", hdev->name, hdev);
1177
28b75a89
AG
1178 cancel_work_sync(&hdev->le_scan);
1179
78c04c0b
VCG
1180 cancel_delayed_work(&hdev->power_off);
1181
1da177e4
LT
1182 hci_req_cancel(hdev, ENODEV);
1183 hci_req_lock(hdev);
1184
1185 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1186 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1187 hci_req_unlock(hdev);
1188 return 0;
1189 }
1190
3eff45ea
GP
1191 /* Flush RX and TX works */
1192 flush_work(&hdev->tx_work);
b78752cc 1193 flush_work(&hdev->rx_work);
1da177e4 1194
16ab91ab 1195 if (hdev->discov_timeout > 0) {
e0f9309f 1196 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1197 hdev->discov_timeout = 0;
5e5282bb 1198 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1199 }
1200
a8b2d5c2 1201 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1202 cancel_delayed_work(&hdev->service_cache);
1203
7ba8b4be
AG
1204 cancel_delayed_work_sync(&hdev->le_scan_disable);
1205
09fd0de5 1206 hci_dev_lock(hdev);
1da177e4
LT
1207 inquiry_cache_flush(hdev);
1208 hci_conn_hash_flush(hdev);
09fd0de5 1209 hci_dev_unlock(hdev);
1da177e4
LT
1210
1211 hci_notify(hdev, HCI_DEV_DOWN);
1212
1213 if (hdev->flush)
1214 hdev->flush(hdev);
1215
1216 /* Reset device */
1217 skb_queue_purge(&hdev->cmd_q);
1218 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1219 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1220 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1221 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1222 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1223 clear_bit(HCI_INIT, &hdev->flags);
1224 }
1225
c347b765
GP
1226 /* flush cmd work */
1227 flush_work(&hdev->cmd_work);
1da177e4
LT
1228
1229 /* Drop queues */
1230 skb_queue_purge(&hdev->rx_q);
1231 skb_queue_purge(&hdev->cmd_q);
1232 skb_queue_purge(&hdev->raw_q);
1233
1234 /* Drop last sent command */
1235 if (hdev->sent_cmd) {
b79f44c1 1236 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1237 kfree_skb(hdev->sent_cmd);
1238 hdev->sent_cmd = NULL;
1239 }
1240
b6ddb638
JH
1241 kfree_skb(hdev->recv_evt);
1242 hdev->recv_evt = NULL;
1243
1da177e4
LT
1244 /* After this point our queues are empty
1245 * and no tasks are scheduled. */
1246 hdev->close(hdev);
1247
35b973c9
JH
1248 /* Clear flags */
1249 hdev->flags = 0;
1250 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1251
bb4b2a9a
AE
1252 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1253 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1254 hci_dev_lock(hdev);
1255 mgmt_powered(hdev, 0);
1256 hci_dev_unlock(hdev);
1257 }
5add6af8 1258
ced5c338
AE
1259 /* Controller radio is available but is currently powered down */
1260 hdev->amp_status = 0;
1261
e59fda8d 1262 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1263 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1264
1da177e4
LT
1265 hci_req_unlock(hdev);
1266
1267 hci_dev_put(hdev);
1268 return 0;
1269}
1270
1271int hci_dev_close(__u16 dev)
1272{
1273 struct hci_dev *hdev;
1274 int err;
1275
70f23020
AE
1276 hdev = hci_dev_get(dev);
1277 if (!hdev)
1da177e4 1278 return -ENODEV;
8ee56540
MH
1279
1280 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1281 cancel_delayed_work(&hdev->power_off);
1282
1da177e4 1283 err = hci_dev_do_close(hdev);
8ee56540 1284
1da177e4
LT
1285 hci_dev_put(hdev);
1286 return err;
1287}
1288
1289int hci_dev_reset(__u16 dev)
1290{
1291 struct hci_dev *hdev;
1292 int ret = 0;
1293
70f23020
AE
1294 hdev = hci_dev_get(dev);
1295 if (!hdev)
1da177e4
LT
1296 return -ENODEV;
1297
1298 hci_req_lock(hdev);
1da177e4
LT
1299
1300 if (!test_bit(HCI_UP, &hdev->flags))
1301 goto done;
1302
1303 /* Drop queues */
1304 skb_queue_purge(&hdev->rx_q);
1305 skb_queue_purge(&hdev->cmd_q);
1306
09fd0de5 1307 hci_dev_lock(hdev);
1da177e4
LT
1308 inquiry_cache_flush(hdev);
1309 hci_conn_hash_flush(hdev);
09fd0de5 1310 hci_dev_unlock(hdev);
1da177e4
LT
1311
1312 if (hdev->flush)
1313 hdev->flush(hdev);
1314
8e87d142 1315 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1316 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1317
1318 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1319 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1320
1321done:
1da177e4
LT
1322 hci_req_unlock(hdev);
1323 hci_dev_put(hdev);
1324 return ret;
1325}
1326
1327int hci_dev_reset_stat(__u16 dev)
1328{
1329 struct hci_dev *hdev;
1330 int ret = 0;
1331
70f23020
AE
1332 hdev = hci_dev_get(dev);
1333 if (!hdev)
1da177e4
LT
1334 return -ENODEV;
1335
1336 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1337
1338 hci_dev_put(hdev);
1339
1340 return ret;
1341}
1342
1343int hci_dev_cmd(unsigned int cmd, void __user *arg)
1344{
1345 struct hci_dev *hdev;
1346 struct hci_dev_req dr;
1347 int err = 0;
1348
1349 if (copy_from_user(&dr, arg, sizeof(dr)))
1350 return -EFAULT;
1351
70f23020
AE
1352 hdev = hci_dev_get(dr.dev_id);
1353 if (!hdev)
1da177e4
LT
1354 return -ENODEV;
1355
1356 switch (cmd) {
1357 case HCISETAUTH:
01178cd4
JH
1358 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1359 HCI_INIT_TIMEOUT);
1da177e4
LT
1360 break;
1361
1362 case HCISETENCRYPT:
1363 if (!lmp_encrypt_capable(hdev)) {
1364 err = -EOPNOTSUPP;
1365 break;
1366 }
1367
1368 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1369 /* Auth must be enabled first */
01178cd4
JH
1370 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1371 HCI_INIT_TIMEOUT);
1da177e4
LT
1372 if (err)
1373 break;
1374 }
1375
01178cd4
JH
1376 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1377 HCI_INIT_TIMEOUT);
1da177e4
LT
1378 break;
1379
1380 case HCISETSCAN:
01178cd4
JH
1381 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1382 HCI_INIT_TIMEOUT);
1da177e4
LT
1383 break;
1384
1da177e4 1385 case HCISETLINKPOL:
01178cd4
JH
1386 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1387 HCI_INIT_TIMEOUT);
1da177e4
LT
1388 break;
1389
1390 case HCISETLINKMODE:
e4e8e37c
MH
1391 hdev->link_mode = ((__u16) dr.dev_opt) &
1392 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1393 break;
1394
1395 case HCISETPTYPE:
1396 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1397 break;
1398
1399 case HCISETACLMTU:
e4e8e37c
MH
1400 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1401 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1402 break;
1403
1404 case HCISETSCOMTU:
e4e8e37c
MH
1405 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1406 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1407 break;
1408
1409 default:
1410 err = -EINVAL;
1411 break;
1412 }
e4e8e37c 1413
1da177e4
LT
1414 hci_dev_put(hdev);
1415 return err;
1416}
1417
1418int hci_get_dev_list(void __user *arg)
1419{
8035ded4 1420 struct hci_dev *hdev;
1da177e4
LT
1421 struct hci_dev_list_req *dl;
1422 struct hci_dev_req *dr;
1da177e4
LT
1423 int n = 0, size, err;
1424 __u16 dev_num;
1425
1426 if (get_user(dev_num, (__u16 __user *) arg))
1427 return -EFAULT;
1428
1429 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1430 return -EINVAL;
1431
1432 size = sizeof(*dl) + dev_num * sizeof(*dr);
1433
70f23020
AE
1434 dl = kzalloc(size, GFP_KERNEL);
1435 if (!dl)
1da177e4
LT
1436 return -ENOMEM;
1437
1438 dr = dl->dev_req;
1439
f20d09d5 1440 read_lock(&hci_dev_list_lock);
8035ded4 1441 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1442 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1443 cancel_delayed_work(&hdev->power_off);
c542a06c 1444
a8b2d5c2
JH
1445 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1446 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1447
1da177e4
LT
1448 (dr + n)->dev_id = hdev->id;
1449 (dr + n)->dev_opt = hdev->flags;
c542a06c 1450
1da177e4
LT
1451 if (++n >= dev_num)
1452 break;
1453 }
f20d09d5 1454 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1455
1456 dl->dev_num = n;
1457 size = sizeof(*dl) + n * sizeof(*dr);
1458
1459 err = copy_to_user(arg, dl, size);
1460 kfree(dl);
1461
1462 return err ? -EFAULT : 0;
1463}
1464
1465int hci_get_dev_info(void __user *arg)
1466{
1467 struct hci_dev *hdev;
1468 struct hci_dev_info di;
1469 int err = 0;
1470
1471 if (copy_from_user(&di, arg, sizeof(di)))
1472 return -EFAULT;
1473
70f23020
AE
1474 hdev = hci_dev_get(di.dev_id);
1475 if (!hdev)
1da177e4
LT
1476 return -ENODEV;
1477
a8b2d5c2 1478 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1479 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1480
a8b2d5c2
JH
1481 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1482 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1483
1da177e4
LT
1484 strcpy(di.name, hdev->name);
1485 di.bdaddr = hdev->bdaddr;
943da25d 1486 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1487 di.flags = hdev->flags;
1488 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1489 if (lmp_bredr_capable(hdev)) {
1490 di.acl_mtu = hdev->acl_mtu;
1491 di.acl_pkts = hdev->acl_pkts;
1492 di.sco_mtu = hdev->sco_mtu;
1493 di.sco_pkts = hdev->sco_pkts;
1494 } else {
1495 di.acl_mtu = hdev->le_mtu;
1496 di.acl_pkts = hdev->le_pkts;
1497 di.sco_mtu = 0;
1498 di.sco_pkts = 0;
1499 }
1da177e4
LT
1500 di.link_policy = hdev->link_policy;
1501 di.link_mode = hdev->link_mode;
1502
1503 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1504 memcpy(&di.features, &hdev->features, sizeof(di.features));
1505
1506 if (copy_to_user(arg, &di, sizeof(di)))
1507 err = -EFAULT;
1508
1509 hci_dev_put(hdev);
1510
1511 return err;
1512}
1513
1514/* ---- Interface to HCI drivers ---- */
1515
611b30f7
MH
1516static int hci_rfkill_set_block(void *data, bool blocked)
1517{
1518 struct hci_dev *hdev = data;
1519
1520 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1521
1522 if (!blocked)
1523 return 0;
1524
1525 hci_dev_do_close(hdev);
1526
1527 return 0;
1528}
1529
1530static const struct rfkill_ops hci_rfkill_ops = {
1531 .set_block = hci_rfkill_set_block,
1532};
1533
ab81cbf9
JH
1534static void hci_power_on(struct work_struct *work)
1535{
1536 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1537
1538 BT_DBG("%s", hdev->name);
1539
1540 if (hci_dev_open(hdev->id) < 0)
1541 return;
1542
a8b2d5c2 1543 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1544 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1545 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1546
a8b2d5c2 1547 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1548 mgmt_index_added(hdev);
ab81cbf9
JH
1549}
1550
1551static void hci_power_off(struct work_struct *work)
1552{
3243553f 1553 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1554 power_off.work);
ab81cbf9
JH
1555
1556 BT_DBG("%s", hdev->name);
1557
8ee56540 1558 hci_dev_do_close(hdev);
ab81cbf9
JH
1559}
1560
16ab91ab
JH
1561static void hci_discov_off(struct work_struct *work)
1562{
1563 struct hci_dev *hdev;
1564 u8 scan = SCAN_PAGE;
1565
1566 hdev = container_of(work, struct hci_dev, discov_off.work);
1567
1568 BT_DBG("%s", hdev->name);
1569
09fd0de5 1570 hci_dev_lock(hdev);
16ab91ab
JH
1571
1572 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1573
1574 hdev->discov_timeout = 0;
1575
09fd0de5 1576 hci_dev_unlock(hdev);
16ab91ab
JH
1577}
1578
2aeb9a1a
JH
1579int hci_uuids_clear(struct hci_dev *hdev)
1580{
4821002c 1581 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1582
4821002c
JH
1583 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1584 list_del(&uuid->list);
2aeb9a1a
JH
1585 kfree(uuid);
1586 }
1587
1588 return 0;
1589}
1590
55ed8ca1
JH
1591int hci_link_keys_clear(struct hci_dev *hdev)
1592{
1593 struct list_head *p, *n;
1594
1595 list_for_each_safe(p, n, &hdev->link_keys) {
1596 struct link_key *key;
1597
1598 key = list_entry(p, struct link_key, list);
1599
1600 list_del(p);
1601 kfree(key);
1602 }
1603
1604 return 0;
1605}
1606
b899efaf
VCG
1607int hci_smp_ltks_clear(struct hci_dev *hdev)
1608{
1609 struct smp_ltk *k, *tmp;
1610
1611 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1612 list_del(&k->list);
1613 kfree(k);
1614 }
1615
1616 return 0;
1617}
1618
55ed8ca1
JH
1619struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1620{
8035ded4 1621 struct link_key *k;
55ed8ca1 1622
8035ded4 1623 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1624 if (bacmp(bdaddr, &k->bdaddr) == 0)
1625 return k;
55ed8ca1
JH
1626
1627 return NULL;
1628}
1629
745c0ce3 1630static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1631 u8 key_type, u8 old_key_type)
d25e28ab
JH
1632{
1633 /* Legacy key */
1634 if (key_type < 0x03)
745c0ce3 1635 return true;
d25e28ab
JH
1636
1637 /* Debug keys are insecure so don't store them persistently */
1638 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1639 return false;
d25e28ab
JH
1640
1641 /* Changed combination key and there's no previous one */
1642 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1643 return false;
d25e28ab
JH
1644
1645 /* Security mode 3 case */
1646 if (!conn)
745c0ce3 1647 return true;
d25e28ab
JH
1648
1649 /* Neither local nor remote side had no-bonding as requirement */
1650 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1651 return true;
d25e28ab
JH
1652
1653 /* Local side had dedicated bonding as requirement */
1654 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1655 return true;
d25e28ab
JH
1656
1657 /* Remote side had dedicated bonding as requirement */
1658 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1659 return true;
d25e28ab
JH
1660
1661 /* If none of the above criteria match, then don't store the key
1662 * persistently */
745c0ce3 1663 return false;
d25e28ab
JH
1664}
1665
c9839a11 1666struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1667{
c9839a11 1668 struct smp_ltk *k;
75d262c2 1669
c9839a11
VCG
1670 list_for_each_entry(k, &hdev->long_term_keys, list) {
1671 if (k->ediv != ediv ||
a8c5fb1a 1672 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1673 continue;
1674
c9839a11 1675 return k;
75d262c2
VCG
1676 }
1677
1678 return NULL;
1679}
75d262c2 1680
c9839a11 1681struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1682 u8 addr_type)
75d262c2 1683{
c9839a11 1684 struct smp_ltk *k;
75d262c2 1685
c9839a11
VCG
1686 list_for_each_entry(k, &hdev->long_term_keys, list)
1687 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1688 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1689 return k;
1690
1691 return NULL;
1692}
75d262c2 1693
d25e28ab 1694int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1695 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1696{
1697 struct link_key *key, *old_key;
745c0ce3
VA
1698 u8 old_key_type;
1699 bool persistent;
55ed8ca1
JH
1700
1701 old_key = hci_find_link_key(hdev, bdaddr);
1702 if (old_key) {
1703 old_key_type = old_key->type;
1704 key = old_key;
1705 } else {
12adcf3a 1706 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1707 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1708 if (!key)
1709 return -ENOMEM;
1710 list_add(&key->list, &hdev->link_keys);
1711 }
1712
6ed93dc6 1713 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1714
d25e28ab
JH
1715 /* Some buggy controller combinations generate a changed
1716 * combination key for legacy pairing even when there's no
1717 * previous key */
1718 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1719 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1720 type = HCI_LK_COMBINATION;
655fe6ec
JH
1721 if (conn)
1722 conn->key_type = type;
1723 }
d25e28ab 1724
55ed8ca1 1725 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1726 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1727 key->pin_len = pin_len;
1728
b6020ba0 1729 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1730 key->type = old_key_type;
4748fed2
JH
1731 else
1732 key->type = type;
1733
4df378a1
JH
1734 if (!new_key)
1735 return 0;
1736
1737 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1738
744cf19e 1739 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1740
6ec5bcad
VA
1741 if (conn)
1742 conn->flush_key = !persistent;
55ed8ca1
JH
1743
1744 return 0;
1745}
1746
c9839a11 1747int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1748 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1749 ediv, u8 rand[8])
75d262c2 1750{
c9839a11 1751 struct smp_ltk *key, *old_key;
75d262c2 1752
c9839a11
VCG
1753 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1754 return 0;
75d262c2 1755
c9839a11
VCG
1756 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1757 if (old_key)
75d262c2 1758 key = old_key;
c9839a11
VCG
1759 else {
1760 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1761 if (!key)
1762 return -ENOMEM;
c9839a11 1763 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1764 }
1765
75d262c2 1766 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1767 key->bdaddr_type = addr_type;
1768 memcpy(key->val, tk, sizeof(key->val));
1769 key->authenticated = authenticated;
1770 key->ediv = ediv;
1771 key->enc_size = enc_size;
1772 key->type = type;
1773 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1774
c9839a11
VCG
1775 if (!new_key)
1776 return 0;
75d262c2 1777
261cc5aa
VCG
1778 if (type & HCI_SMP_LTK)
1779 mgmt_new_ltk(hdev, key, 1);
1780
75d262c2
VCG
1781 return 0;
1782}
1783
55ed8ca1
JH
1784int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1785{
1786 struct link_key *key;
1787
1788 key = hci_find_link_key(hdev, bdaddr);
1789 if (!key)
1790 return -ENOENT;
1791
6ed93dc6 1792 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1793
1794 list_del(&key->list);
1795 kfree(key);
1796
1797 return 0;
1798}
1799
b899efaf
VCG
1800int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1801{
1802 struct smp_ltk *k, *tmp;
1803
1804 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1805 if (bacmp(bdaddr, &k->bdaddr))
1806 continue;
1807
6ed93dc6 1808 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1809
1810 list_del(&k->list);
1811 kfree(k);
1812 }
1813
1814 return 0;
1815}
1816
6bd32326 1817/* HCI command timer function */
bda4f23a 1818static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1819{
1820 struct hci_dev *hdev = (void *) arg;
1821
bda4f23a
AE
1822 if (hdev->sent_cmd) {
1823 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1824 u16 opcode = __le16_to_cpu(sent->opcode);
1825
1826 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1827 } else {
1828 BT_ERR("%s command tx timeout", hdev->name);
1829 }
1830
6bd32326 1831 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1832 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1833}
1834
2763eda6 1835struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1836 bdaddr_t *bdaddr)
2763eda6
SJ
1837{
1838 struct oob_data *data;
1839
1840 list_for_each_entry(data, &hdev->remote_oob_data, list)
1841 if (bacmp(bdaddr, &data->bdaddr) == 0)
1842 return data;
1843
1844 return NULL;
1845}
1846
1847int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1848{
1849 struct oob_data *data;
1850
1851 data = hci_find_remote_oob_data(hdev, bdaddr);
1852 if (!data)
1853 return -ENOENT;
1854
6ed93dc6 1855 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1856
1857 list_del(&data->list);
1858 kfree(data);
1859
1860 return 0;
1861}
1862
1863int hci_remote_oob_data_clear(struct hci_dev *hdev)
1864{
1865 struct oob_data *data, *n;
1866
1867 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1868 list_del(&data->list);
1869 kfree(data);
1870 }
1871
1872 return 0;
1873}
1874
1875int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1876 u8 *randomizer)
2763eda6
SJ
1877{
1878 struct oob_data *data;
1879
1880 data = hci_find_remote_oob_data(hdev, bdaddr);
1881
1882 if (!data) {
1883 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1884 if (!data)
1885 return -ENOMEM;
1886
1887 bacpy(&data->bdaddr, bdaddr);
1888 list_add(&data->list, &hdev->remote_oob_data);
1889 }
1890
1891 memcpy(data->hash, hash, sizeof(data->hash));
1892 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1893
6ed93dc6 1894 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1895
1896 return 0;
1897}
1898
04124681 1899struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1900{
8035ded4 1901 struct bdaddr_list *b;
b2a66aad 1902
8035ded4 1903 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1904 if (bacmp(bdaddr, &b->bdaddr) == 0)
1905 return b;
b2a66aad
AJ
1906
1907 return NULL;
1908}
1909
1910int hci_blacklist_clear(struct hci_dev *hdev)
1911{
1912 struct list_head *p, *n;
1913
1914 list_for_each_safe(p, n, &hdev->blacklist) {
1915 struct bdaddr_list *b;
1916
1917 b = list_entry(p, struct bdaddr_list, list);
1918
1919 list_del(p);
1920 kfree(b);
1921 }
1922
1923 return 0;
1924}
1925
88c1fe4b 1926int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1927{
1928 struct bdaddr_list *entry;
b2a66aad
AJ
1929
1930 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1931 return -EBADF;
1932
5e762444
AJ
1933 if (hci_blacklist_lookup(hdev, bdaddr))
1934 return -EEXIST;
b2a66aad
AJ
1935
1936 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1937 if (!entry)
1938 return -ENOMEM;
b2a66aad
AJ
1939
1940 bacpy(&entry->bdaddr, bdaddr);
1941
1942 list_add(&entry->list, &hdev->blacklist);
1943
88c1fe4b 1944 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1945}
1946
88c1fe4b 1947int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1948{
1949 struct bdaddr_list *entry;
b2a66aad 1950
1ec918ce 1951 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1952 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1953
1954 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1955 if (!entry)
5e762444 1956 return -ENOENT;
b2a66aad
AJ
1957
1958 list_del(&entry->list);
1959 kfree(entry);
1960
88c1fe4b 1961 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1962}
1963
42c6b129 1964static void le_scan_param_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1965{
1966 struct le_scan_params *param = (struct le_scan_params *) opt;
1967 struct hci_cp_le_set_scan_param cp;
1968
1969 memset(&cp, 0, sizeof(cp));
1970 cp.type = param->type;
1971 cp.interval = cpu_to_le16(param->interval);
1972 cp.window = cpu_to_le16(param->window);
1973
42c6b129 1974 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
7ba8b4be
AG
1975}
1976
42c6b129 1977static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1978{
1979 struct hci_cp_le_set_scan_enable cp;
1980
1981 memset(&cp, 0, sizeof(cp));
1982 cp.enable = 1;
0431a43c 1983 cp.filter_dup = 1;
7ba8b4be 1984
42c6b129 1985 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
7ba8b4be
AG
1986}
1987
1988static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1989 u16 window, int timeout)
7ba8b4be
AG
1990{
1991 long timeo = msecs_to_jiffies(3000);
1992 struct le_scan_params param;
1993 int err;
1994
1995 BT_DBG("%s", hdev->name);
1996
1997 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1998 return -EINPROGRESS;
1999
2000 param.type = type;
2001 param.interval = interval;
2002 param.window = window;
2003
2004 hci_req_lock(hdev);
2005
01178cd4
JH
2006 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2007 timeo);
7ba8b4be 2008 if (!err)
01178cd4 2009 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
2010
2011 hci_req_unlock(hdev);
2012
2013 if (err < 0)
2014 return err;
2015
46818ed5
JH
2016 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2017 msecs_to_jiffies(timeout));
7ba8b4be
AG
2018
2019 return 0;
2020}
2021
7dbfac1d
AG
2022int hci_cancel_le_scan(struct hci_dev *hdev)
2023{
2024 BT_DBG("%s", hdev->name);
2025
2026 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2027 return -EALREADY;
2028
2029 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2030 struct hci_cp_le_set_scan_enable cp;
2031
2032 /* Send HCI command to disable LE Scan */
2033 memset(&cp, 0, sizeof(cp));
2034 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2035 }
2036
2037 return 0;
2038}
2039
7ba8b4be
AG
2040static void le_scan_disable_work(struct work_struct *work)
2041{
2042 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2043 le_scan_disable.work);
7ba8b4be
AG
2044 struct hci_cp_le_set_scan_enable cp;
2045
2046 BT_DBG("%s", hdev->name);
2047
2048 memset(&cp, 0, sizeof(cp));
2049
2050 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2051}
2052
28b75a89
AG
2053static void le_scan_work(struct work_struct *work)
2054{
2055 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2056 struct le_scan_params *param = &hdev->le_scan_params;
2057
2058 BT_DBG("%s", hdev->name);
2059
04124681
GP
2060 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2061 param->timeout);
28b75a89
AG
2062}
2063
2064int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 2065 int timeout)
28b75a89
AG
2066{
2067 struct le_scan_params *param = &hdev->le_scan_params;
2068
2069 BT_DBG("%s", hdev->name);
2070
f1550478
JH
2071 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2072 return -ENOTSUPP;
2073
28b75a89
AG
2074 if (work_busy(&hdev->le_scan))
2075 return -EINPROGRESS;
2076
2077 param->type = type;
2078 param->interval = interval;
2079 param->window = window;
2080 param->timeout = timeout;
2081
2082 queue_work(system_long_wq, &hdev->le_scan);
2083
2084 return 0;
2085}
2086
9be0dab7
DH
2087/* Alloc HCI device */
2088struct hci_dev *hci_alloc_dev(void)
2089{
2090 struct hci_dev *hdev;
2091
2092 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2093 if (!hdev)
2094 return NULL;
2095
b1b813d4
DH
2096 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2097 hdev->esco_type = (ESCO_HV1);
2098 hdev->link_mode = (HCI_LM_ACCEPT);
2099 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2100 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2101 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2102
b1b813d4
DH
2103 hdev->sniff_max_interval = 800;
2104 hdev->sniff_min_interval = 80;
2105
2106 mutex_init(&hdev->lock);
2107 mutex_init(&hdev->req_lock);
2108
2109 INIT_LIST_HEAD(&hdev->mgmt_pending);
2110 INIT_LIST_HEAD(&hdev->blacklist);
2111 INIT_LIST_HEAD(&hdev->uuids);
2112 INIT_LIST_HEAD(&hdev->link_keys);
2113 INIT_LIST_HEAD(&hdev->long_term_keys);
2114 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2115 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2116
2117 INIT_WORK(&hdev->rx_work, hci_rx_work);
2118 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2119 INIT_WORK(&hdev->tx_work, hci_tx_work);
2120 INIT_WORK(&hdev->power_on, hci_power_on);
2121 INIT_WORK(&hdev->le_scan, le_scan_work);
2122
b1b813d4
DH
2123 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2124 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2125 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2126
9be0dab7 2127 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
2128 skb_queue_head_init(&hdev->rx_q);
2129 skb_queue_head_init(&hdev->cmd_q);
2130 skb_queue_head_init(&hdev->raw_q);
2131
2132 init_waitqueue_head(&hdev->req_wait_q);
2133
bda4f23a 2134 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2135
b1b813d4
DH
2136 hci_init_sysfs(hdev);
2137 discovery_init(hdev);
9be0dab7
DH
2138
2139 return hdev;
2140}
2141EXPORT_SYMBOL(hci_alloc_dev);
2142
2143/* Free HCI device */
2144void hci_free_dev(struct hci_dev *hdev)
2145{
2146 skb_queue_purge(&hdev->driver_init);
2147
2148 /* will free via device release */
2149 put_device(&hdev->dev);
2150}
2151EXPORT_SYMBOL(hci_free_dev);
2152
1da177e4
LT
2153/* Register HCI device */
2154int hci_register_dev(struct hci_dev *hdev)
2155{
b1b813d4 2156 int id, error;
1da177e4 2157
010666a1 2158 if (!hdev->open || !hdev->close)
1da177e4
LT
2159 return -EINVAL;
2160
08add513
MM
2161 /* Do not allow HCI_AMP devices to register at index 0,
2162 * so the index can be used as the AMP controller ID.
2163 */
3df92b31
SL
2164 switch (hdev->dev_type) {
2165 case HCI_BREDR:
2166 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2167 break;
2168 case HCI_AMP:
2169 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2170 break;
2171 default:
2172 return -EINVAL;
1da177e4 2173 }
8e87d142 2174
3df92b31
SL
2175 if (id < 0)
2176 return id;
2177
1da177e4
LT
2178 sprintf(hdev->name, "hci%d", id);
2179 hdev->id = id;
2d8b3a11
AE
2180
2181 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2182
3df92b31
SL
2183 write_lock(&hci_dev_list_lock);
2184 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2185 write_unlock(&hci_dev_list_lock);
1da177e4 2186
32845eb1 2187 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 2188 WQ_MEM_RECLAIM, 1);
33ca954d
DH
2189 if (!hdev->workqueue) {
2190 error = -ENOMEM;
2191 goto err;
2192 }
f48fd9c8 2193
6ead1bbc
JH
2194 hdev->req_workqueue = alloc_workqueue(hdev->name,
2195 WQ_HIGHPRI | WQ_UNBOUND |
2196 WQ_MEM_RECLAIM, 1);
2197 if (!hdev->req_workqueue) {
2198 destroy_workqueue(hdev->workqueue);
2199 error = -ENOMEM;
2200 goto err;
2201 }
2202
33ca954d
DH
2203 error = hci_add_sysfs(hdev);
2204 if (error < 0)
2205 goto err_wqueue;
1da177e4 2206
611b30f7 2207 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2208 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2209 hdev);
611b30f7
MH
2210 if (hdev->rfkill) {
2211 if (rfkill_register(hdev->rfkill) < 0) {
2212 rfkill_destroy(hdev->rfkill);
2213 hdev->rfkill = NULL;
2214 }
2215 }
2216
a8b2d5c2 2217 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2218
2219 if (hdev->dev_type != HCI_AMP)
2220 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2221
1da177e4 2222 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2223 hci_dev_hold(hdev);
1da177e4 2224
19202573 2225 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2226
1da177e4 2227 return id;
f48fd9c8 2228
33ca954d
DH
2229err_wqueue:
2230 destroy_workqueue(hdev->workqueue);
6ead1bbc 2231 destroy_workqueue(hdev->req_workqueue);
33ca954d 2232err:
3df92b31 2233 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2234 write_lock(&hci_dev_list_lock);
f48fd9c8 2235 list_del(&hdev->list);
f20d09d5 2236 write_unlock(&hci_dev_list_lock);
f48fd9c8 2237
33ca954d 2238 return error;
1da177e4
LT
2239}
2240EXPORT_SYMBOL(hci_register_dev);
2241
2242/* Unregister HCI device */
59735631 2243void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2244{
3df92b31 2245 int i, id;
ef222013 2246
c13854ce 2247 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2248
94324962
JH
2249 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2250
3df92b31
SL
2251 id = hdev->id;
2252
f20d09d5 2253 write_lock(&hci_dev_list_lock);
1da177e4 2254 list_del(&hdev->list);
f20d09d5 2255 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2256
2257 hci_dev_do_close(hdev);
2258
cd4c5391 2259 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2260 kfree_skb(hdev->reassembly[i]);
2261
b9b5ef18
GP
2262 cancel_work_sync(&hdev->power_on);
2263
ab81cbf9 2264 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2265 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2266 hci_dev_lock(hdev);
744cf19e 2267 mgmt_index_removed(hdev);
09fd0de5 2268 hci_dev_unlock(hdev);
56e5cb86 2269 }
ab81cbf9 2270
2e58ef3e
JH
2271 /* mgmt_index_removed should take care of emptying the
2272 * pending list */
2273 BUG_ON(!list_empty(&hdev->mgmt_pending));
2274
1da177e4
LT
2275 hci_notify(hdev, HCI_DEV_UNREG);
2276
611b30f7
MH
2277 if (hdev->rfkill) {
2278 rfkill_unregister(hdev->rfkill);
2279 rfkill_destroy(hdev->rfkill);
2280 }
2281
ce242970 2282 hci_del_sysfs(hdev);
147e2d59 2283
f48fd9c8 2284 destroy_workqueue(hdev->workqueue);
6ead1bbc 2285 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2286
09fd0de5 2287 hci_dev_lock(hdev);
e2e0cacb 2288 hci_blacklist_clear(hdev);
2aeb9a1a 2289 hci_uuids_clear(hdev);
55ed8ca1 2290 hci_link_keys_clear(hdev);
b899efaf 2291 hci_smp_ltks_clear(hdev);
2763eda6 2292 hci_remote_oob_data_clear(hdev);
09fd0de5 2293 hci_dev_unlock(hdev);
e2e0cacb 2294
dc946bd8 2295 hci_dev_put(hdev);
3df92b31
SL
2296
2297 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2298}
2299EXPORT_SYMBOL(hci_unregister_dev);
2300
2301/* Suspend HCI device */
2302int hci_suspend_dev(struct hci_dev *hdev)
2303{
2304 hci_notify(hdev, HCI_DEV_SUSPEND);
2305 return 0;
2306}
2307EXPORT_SYMBOL(hci_suspend_dev);
2308
2309/* Resume HCI device */
2310int hci_resume_dev(struct hci_dev *hdev)
2311{
2312 hci_notify(hdev, HCI_DEV_RESUME);
2313 return 0;
2314}
2315EXPORT_SYMBOL(hci_resume_dev);
2316
76bca880
MH
2317/* Receive frame from HCI drivers */
2318int hci_recv_frame(struct sk_buff *skb)
2319{
2320 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2321 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2322 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2323 kfree_skb(skb);
2324 return -ENXIO;
2325 }
2326
d82603c6 2327 /* Incoming skb */
76bca880
MH
2328 bt_cb(skb)->incoming = 1;
2329
2330 /* Time stamp */
2331 __net_timestamp(skb);
2332
76bca880 2333 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2334 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2335
76bca880
MH
2336 return 0;
2337}
2338EXPORT_SYMBOL(hci_recv_frame);
2339
33e882a5 2340static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2341 int count, __u8 index)
33e882a5
SS
2342{
2343 int len = 0;
2344 int hlen = 0;
2345 int remain = count;
2346 struct sk_buff *skb;
2347 struct bt_skb_cb *scb;
2348
2349 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2350 index >= NUM_REASSEMBLY)
33e882a5
SS
2351 return -EILSEQ;
2352
2353 skb = hdev->reassembly[index];
2354
2355 if (!skb) {
2356 switch (type) {
2357 case HCI_ACLDATA_PKT:
2358 len = HCI_MAX_FRAME_SIZE;
2359 hlen = HCI_ACL_HDR_SIZE;
2360 break;
2361 case HCI_EVENT_PKT:
2362 len = HCI_MAX_EVENT_SIZE;
2363 hlen = HCI_EVENT_HDR_SIZE;
2364 break;
2365 case HCI_SCODATA_PKT:
2366 len = HCI_MAX_SCO_SIZE;
2367 hlen = HCI_SCO_HDR_SIZE;
2368 break;
2369 }
2370
1e429f38 2371 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2372 if (!skb)
2373 return -ENOMEM;
2374
2375 scb = (void *) skb->cb;
2376 scb->expect = hlen;
2377 scb->pkt_type = type;
2378
2379 skb->dev = (void *) hdev;
2380 hdev->reassembly[index] = skb;
2381 }
2382
2383 while (count) {
2384 scb = (void *) skb->cb;
89bb46d0 2385 len = min_t(uint, scb->expect, count);
33e882a5
SS
2386
2387 memcpy(skb_put(skb, len), data, len);
2388
2389 count -= len;
2390 data += len;
2391 scb->expect -= len;
2392 remain = count;
2393
2394 switch (type) {
2395 case HCI_EVENT_PKT:
2396 if (skb->len == HCI_EVENT_HDR_SIZE) {
2397 struct hci_event_hdr *h = hci_event_hdr(skb);
2398 scb->expect = h->plen;
2399
2400 if (skb_tailroom(skb) < scb->expect) {
2401 kfree_skb(skb);
2402 hdev->reassembly[index] = NULL;
2403 return -ENOMEM;
2404 }
2405 }
2406 break;
2407
2408 case HCI_ACLDATA_PKT:
2409 if (skb->len == HCI_ACL_HDR_SIZE) {
2410 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2411 scb->expect = __le16_to_cpu(h->dlen);
2412
2413 if (skb_tailroom(skb) < scb->expect) {
2414 kfree_skb(skb);
2415 hdev->reassembly[index] = NULL;
2416 return -ENOMEM;
2417 }
2418 }
2419 break;
2420
2421 case HCI_SCODATA_PKT:
2422 if (skb->len == HCI_SCO_HDR_SIZE) {
2423 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2424 scb->expect = h->dlen;
2425
2426 if (skb_tailroom(skb) < scb->expect) {
2427 kfree_skb(skb);
2428 hdev->reassembly[index] = NULL;
2429 return -ENOMEM;
2430 }
2431 }
2432 break;
2433 }
2434
2435 if (scb->expect == 0) {
2436 /* Complete frame */
2437
2438 bt_cb(skb)->pkt_type = type;
2439 hci_recv_frame(skb);
2440
2441 hdev->reassembly[index] = NULL;
2442 return remain;
2443 }
2444 }
2445
2446 return remain;
2447}
2448
ef222013
MH
2449int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2450{
f39a3c06
SS
2451 int rem = 0;
2452
ef222013
MH
2453 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2454 return -EILSEQ;
2455
da5f6c37 2456 while (count) {
1e429f38 2457 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2458 if (rem < 0)
2459 return rem;
ef222013 2460
f39a3c06
SS
2461 data += (count - rem);
2462 count = rem;
f81c6224 2463 }
ef222013 2464
f39a3c06 2465 return rem;
ef222013
MH
2466}
2467EXPORT_SYMBOL(hci_recv_fragment);
2468
99811510
SS
2469#define STREAM_REASSEMBLY 0
2470
2471int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2472{
2473 int type;
2474 int rem = 0;
2475
da5f6c37 2476 while (count) {
99811510
SS
2477 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2478
2479 if (!skb) {
2480 struct { char type; } *pkt;
2481
2482 /* Start of the frame */
2483 pkt = data;
2484 type = pkt->type;
2485
2486 data++;
2487 count--;
2488 } else
2489 type = bt_cb(skb)->pkt_type;
2490
1e429f38 2491 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2492 STREAM_REASSEMBLY);
99811510
SS
2493 if (rem < 0)
2494 return rem;
2495
2496 data += (count - rem);
2497 count = rem;
f81c6224 2498 }
99811510
SS
2499
2500 return rem;
2501}
2502EXPORT_SYMBOL(hci_recv_stream_fragment);
2503
1da177e4
LT
2504/* ---- Interface to upper protocols ---- */
2505
1da177e4
LT
2506int hci_register_cb(struct hci_cb *cb)
2507{
2508 BT_DBG("%p name %s", cb, cb->name);
2509
f20d09d5 2510 write_lock(&hci_cb_list_lock);
1da177e4 2511 list_add(&cb->list, &hci_cb_list);
f20d09d5 2512 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2513
2514 return 0;
2515}
2516EXPORT_SYMBOL(hci_register_cb);
2517
2518int hci_unregister_cb(struct hci_cb *cb)
2519{
2520 BT_DBG("%p name %s", cb, cb->name);
2521
f20d09d5 2522 write_lock(&hci_cb_list_lock);
1da177e4 2523 list_del(&cb->list);
f20d09d5 2524 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2525
2526 return 0;
2527}
2528EXPORT_SYMBOL(hci_unregister_cb);
2529
2530static int hci_send_frame(struct sk_buff *skb)
2531{
2532 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2533
2534 if (!hdev) {
2535 kfree_skb(skb);
2536 return -ENODEV;
2537 }
2538
0d48d939 2539 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2540
cd82e61c
MH
2541 /* Time stamp */
2542 __net_timestamp(skb);
1da177e4 2543
cd82e61c
MH
2544 /* Send copy to monitor */
2545 hci_send_to_monitor(hdev, skb);
2546
2547 if (atomic_read(&hdev->promisc)) {
2548 /* Send copy to the sockets */
470fe1b5 2549 hci_send_to_sock(hdev, skb);
1da177e4
LT
2550 }
2551
2552 /* Get rid of skb owner, prior to sending to the driver. */
2553 skb_orphan(skb);
2554
2555 return hdev->send(skb);
2556}
2557
3119ae95
JH
2558void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2559{
2560 skb_queue_head_init(&req->cmd_q);
2561 req->hdev = hdev;
5d73e034 2562 req->err = 0;
3119ae95
JH
2563}
2564
2565int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2566{
2567 struct hci_dev *hdev = req->hdev;
2568 struct sk_buff *skb;
2569 unsigned long flags;
2570
2571 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2572
5d73e034
AG
2573 /* If an error occured during request building, remove all HCI
2574 * commands queued on the HCI request queue.
2575 */
2576 if (req->err) {
2577 skb_queue_purge(&req->cmd_q);
2578 return req->err;
2579 }
2580
3119ae95
JH
2581 /* Do not allow empty requests */
2582 if (skb_queue_empty(&req->cmd_q))
382b0c39 2583 return -ENODATA;
3119ae95
JH
2584
2585 skb = skb_peek_tail(&req->cmd_q);
2586 bt_cb(skb)->req.complete = complete;
2587
2588 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2589 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2590 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2591
2592 queue_work(hdev->workqueue, &hdev->cmd_work);
2593
2594 return 0;
2595}
2596
1ca3a9d0
JH
2597static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2598 u32 plen, void *param)
1da177e4
LT
2599{
2600 int len = HCI_COMMAND_HDR_SIZE + plen;
2601 struct hci_command_hdr *hdr;
2602 struct sk_buff *skb;
2603
1da177e4 2604 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2605 if (!skb)
2606 return NULL;
1da177e4
LT
2607
2608 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2609 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2610 hdr->plen = plen;
2611
2612 if (plen)
2613 memcpy(skb_put(skb, plen), param, plen);
2614
2615 BT_DBG("skb len %d", skb->len);
2616
0d48d939 2617 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2618 skb->dev = (void *) hdev;
c78ae283 2619
1ca3a9d0
JH
2620 return skb;
2621}
2622
2623/* Send HCI command */
2624int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2625{
2626 struct sk_buff *skb;
2627
2628 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2629
2630 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2631 if (!skb) {
2632 BT_ERR("%s no memory for command", hdev->name);
2633 return -ENOMEM;
2634 }
2635
11714b3d
JH
2636 /* Stand-alone HCI commands must be flaged as
2637 * single-command requests.
2638 */
2639 bt_cb(skb)->req.start = true;
2640
1da177e4 2641 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2642 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2643
2644 return 0;
2645}
1da177e4 2646
71c76a17 2647/* Queue a command to an asynchronous HCI request */
e348fe6b 2648void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
71c76a17
JH
2649{
2650 struct hci_dev *hdev = req->hdev;
2651 struct sk_buff *skb;
2652
2653 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2654
34739c1e
AG
2655 /* If an error occured during request building, there is no point in
2656 * queueing the HCI command. We can simply return.
2657 */
2658 if (req->err)
2659 return;
2660
71c76a17
JH
2661 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2662 if (!skb) {
5d73e034
AG
2663 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2664 hdev->name, opcode);
2665 req->err = -ENOMEM;
e348fe6b 2666 return;
71c76a17
JH
2667 }
2668
2669 if (skb_queue_empty(&req->cmd_q))
2670 bt_cb(skb)->req.start = true;
2671
2672 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2673}
2674
1da177e4 2675/* Get data from the previously sent command */
a9de9248 2676void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2677{
2678 struct hci_command_hdr *hdr;
2679
2680 if (!hdev->sent_cmd)
2681 return NULL;
2682
2683 hdr = (void *) hdev->sent_cmd->data;
2684
a9de9248 2685 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2686 return NULL;
2687
f0e09510 2688 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2689
2690 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2691}
2692
2693/* Send ACL data */
2694static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2695{
2696 struct hci_acl_hdr *hdr;
2697 int len = skb->len;
2698
badff6d0
ACM
2699 skb_push(skb, HCI_ACL_HDR_SIZE);
2700 skb_reset_transport_header(skb);
9c70220b 2701 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2702 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2703 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2704}
2705
ee22be7e 2706static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2707 struct sk_buff *skb, __u16 flags)
1da177e4 2708{
ee22be7e 2709 struct hci_conn *conn = chan->conn;
1da177e4
LT
2710 struct hci_dev *hdev = conn->hdev;
2711 struct sk_buff *list;
2712
087bfd99
GP
2713 skb->len = skb_headlen(skb);
2714 skb->data_len = 0;
2715
2716 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2717
2718 switch (hdev->dev_type) {
2719 case HCI_BREDR:
2720 hci_add_acl_hdr(skb, conn->handle, flags);
2721 break;
2722 case HCI_AMP:
2723 hci_add_acl_hdr(skb, chan->handle, flags);
2724 break;
2725 default:
2726 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2727 return;
2728 }
087bfd99 2729
70f23020
AE
2730 list = skb_shinfo(skb)->frag_list;
2731 if (!list) {
1da177e4
LT
2732 /* Non fragmented */
2733 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2734
73d80deb 2735 skb_queue_tail(queue, skb);
1da177e4
LT
2736 } else {
2737 /* Fragmented */
2738 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2739
2740 skb_shinfo(skb)->frag_list = NULL;
2741
2742 /* Queue all fragments atomically */
af3e6359 2743 spin_lock(&queue->lock);
1da177e4 2744
73d80deb 2745 __skb_queue_tail(queue, skb);
e702112f
AE
2746
2747 flags &= ~ACL_START;
2748 flags |= ACL_CONT;
1da177e4
LT
2749 do {
2750 skb = list; list = list->next;
8e87d142 2751
1da177e4 2752 skb->dev = (void *) hdev;
0d48d939 2753 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2754 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2755
2756 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2757
73d80deb 2758 __skb_queue_tail(queue, skb);
1da177e4
LT
2759 } while (list);
2760
af3e6359 2761 spin_unlock(&queue->lock);
1da177e4 2762 }
73d80deb
LAD
2763}
2764
2765void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2766{
ee22be7e 2767 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2768
f0e09510 2769 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2770
2771 skb->dev = (void *) hdev;
73d80deb 2772
ee22be7e 2773 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2774
3eff45ea 2775 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2776}
1da177e4
LT
2777
2778/* Send SCO data */
0d861d8b 2779void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2780{
2781 struct hci_dev *hdev = conn->hdev;
2782 struct hci_sco_hdr hdr;
2783
2784 BT_DBG("%s len %d", hdev->name, skb->len);
2785
aca3192c 2786 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2787 hdr.dlen = skb->len;
2788
badff6d0
ACM
2789 skb_push(skb, HCI_SCO_HDR_SIZE);
2790 skb_reset_transport_header(skb);
9c70220b 2791 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2792
2793 skb->dev = (void *) hdev;
0d48d939 2794 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2795
1da177e4 2796 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2797 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2798}
1da177e4
LT
2799
2800/* ---- HCI TX task (outgoing data) ---- */
2801
2802/* HCI Connection scheduler */
6039aa73
GP
2803static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2804 int *quote)
1da177e4
LT
2805{
2806 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2807 struct hci_conn *conn = NULL, *c;
abc5de8f 2808 unsigned int num = 0, min = ~0;
1da177e4 2809
8e87d142 2810 /* We don't have to lock device here. Connections are always
1da177e4 2811 * added and removed with TX task disabled. */
bf4c6325
GP
2812
2813 rcu_read_lock();
2814
2815 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2816 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2817 continue;
769be974
MH
2818
2819 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2820 continue;
2821
1da177e4
LT
2822 num++;
2823
2824 if (c->sent < min) {
2825 min = c->sent;
2826 conn = c;
2827 }
52087a79
LAD
2828
2829 if (hci_conn_num(hdev, type) == num)
2830 break;
1da177e4
LT
2831 }
2832
bf4c6325
GP
2833 rcu_read_unlock();
2834
1da177e4 2835 if (conn) {
6ed58ec5
VT
2836 int cnt, q;
2837
2838 switch (conn->type) {
2839 case ACL_LINK:
2840 cnt = hdev->acl_cnt;
2841 break;
2842 case SCO_LINK:
2843 case ESCO_LINK:
2844 cnt = hdev->sco_cnt;
2845 break;
2846 case LE_LINK:
2847 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2848 break;
2849 default:
2850 cnt = 0;
2851 BT_ERR("Unknown link type");
2852 }
2853
2854 q = cnt / num;
1da177e4
LT
2855 *quote = q ? q : 1;
2856 } else
2857 *quote = 0;
2858
2859 BT_DBG("conn %p quote %d", conn, *quote);
2860 return conn;
2861}
2862
6039aa73 2863static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2864{
2865 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2866 struct hci_conn *c;
1da177e4 2867
bae1f5d9 2868 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2869
bf4c6325
GP
2870 rcu_read_lock();
2871
1da177e4 2872 /* Kill stalled connections */
bf4c6325 2873 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2874 if (c->type == type && c->sent) {
6ed93dc6
AE
2875 BT_ERR("%s killing stalled connection %pMR",
2876 hdev->name, &c->dst);
bed71748 2877 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2878 }
2879 }
bf4c6325
GP
2880
2881 rcu_read_unlock();
1da177e4
LT
2882}
2883
6039aa73
GP
2884static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2885 int *quote)
1da177e4 2886{
73d80deb
LAD
2887 struct hci_conn_hash *h = &hdev->conn_hash;
2888 struct hci_chan *chan = NULL;
abc5de8f 2889 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2890 struct hci_conn *conn;
73d80deb
LAD
2891 int cnt, q, conn_num = 0;
2892
2893 BT_DBG("%s", hdev->name);
2894
bf4c6325
GP
2895 rcu_read_lock();
2896
2897 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2898 struct hci_chan *tmp;
2899
2900 if (conn->type != type)
2901 continue;
2902
2903 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2904 continue;
2905
2906 conn_num++;
2907
8192edef 2908 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2909 struct sk_buff *skb;
2910
2911 if (skb_queue_empty(&tmp->data_q))
2912 continue;
2913
2914 skb = skb_peek(&tmp->data_q);
2915 if (skb->priority < cur_prio)
2916 continue;
2917
2918 if (skb->priority > cur_prio) {
2919 num = 0;
2920 min = ~0;
2921 cur_prio = skb->priority;
2922 }
2923
2924 num++;
2925
2926 if (conn->sent < min) {
2927 min = conn->sent;
2928 chan = tmp;
2929 }
2930 }
2931
2932 if (hci_conn_num(hdev, type) == conn_num)
2933 break;
2934 }
2935
bf4c6325
GP
2936 rcu_read_unlock();
2937
73d80deb
LAD
2938 if (!chan)
2939 return NULL;
2940
2941 switch (chan->conn->type) {
2942 case ACL_LINK:
2943 cnt = hdev->acl_cnt;
2944 break;
bd1eb66b
AE
2945 case AMP_LINK:
2946 cnt = hdev->block_cnt;
2947 break;
73d80deb
LAD
2948 case SCO_LINK:
2949 case ESCO_LINK:
2950 cnt = hdev->sco_cnt;
2951 break;
2952 case LE_LINK:
2953 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2954 break;
2955 default:
2956 cnt = 0;
2957 BT_ERR("Unknown link type");
2958 }
2959
2960 q = cnt / num;
2961 *quote = q ? q : 1;
2962 BT_DBG("chan %p quote %d", chan, *quote);
2963 return chan;
2964}
2965
02b20f0b
LAD
2966static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2967{
2968 struct hci_conn_hash *h = &hdev->conn_hash;
2969 struct hci_conn *conn;
2970 int num = 0;
2971
2972 BT_DBG("%s", hdev->name);
2973
bf4c6325
GP
2974 rcu_read_lock();
2975
2976 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2977 struct hci_chan *chan;
2978
2979 if (conn->type != type)
2980 continue;
2981
2982 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2983 continue;
2984
2985 num++;
2986
8192edef 2987 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2988 struct sk_buff *skb;
2989
2990 if (chan->sent) {
2991 chan->sent = 0;
2992 continue;
2993 }
2994
2995 if (skb_queue_empty(&chan->data_q))
2996 continue;
2997
2998 skb = skb_peek(&chan->data_q);
2999 if (skb->priority >= HCI_PRIO_MAX - 1)
3000 continue;
3001
3002 skb->priority = HCI_PRIO_MAX - 1;
3003
3004 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3005 skb->priority);
02b20f0b
LAD
3006 }
3007
3008 if (hci_conn_num(hdev, type) == num)
3009 break;
3010 }
bf4c6325
GP
3011
3012 rcu_read_unlock();
3013
02b20f0b
LAD
3014}
3015
b71d385a
AE
3016static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3017{
3018 /* Calculate count of blocks used by this packet */
3019 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3020}
3021
6039aa73 3022static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3023{
1da177e4
LT
3024 if (!test_bit(HCI_RAW, &hdev->flags)) {
3025 /* ACL tx timeout must be longer than maximum
3026 * link supervision timeout (40.9 seconds) */
63d2bc1b 3027 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3028 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3029 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3030 }
63d2bc1b 3031}
1da177e4 3032
6039aa73 3033static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3034{
3035 unsigned int cnt = hdev->acl_cnt;
3036 struct hci_chan *chan;
3037 struct sk_buff *skb;
3038 int quote;
3039
3040 __check_timeout(hdev, cnt);
04837f64 3041
73d80deb 3042 while (hdev->acl_cnt &&
a8c5fb1a 3043 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3044 u32 priority = (skb_peek(&chan->data_q))->priority;
3045 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3046 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3047 skb->len, skb->priority);
73d80deb 3048
ec1cce24
LAD
3049 /* Stop if priority has changed */
3050 if (skb->priority < priority)
3051 break;
3052
3053 skb = skb_dequeue(&chan->data_q);
3054
73d80deb 3055 hci_conn_enter_active_mode(chan->conn,
04124681 3056 bt_cb(skb)->force_active);
04837f64 3057
1da177e4
LT
3058 hci_send_frame(skb);
3059 hdev->acl_last_tx = jiffies;
3060
3061 hdev->acl_cnt--;
73d80deb
LAD
3062 chan->sent++;
3063 chan->conn->sent++;
1da177e4
LT
3064 }
3065 }
02b20f0b
LAD
3066
3067 if (cnt != hdev->acl_cnt)
3068 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3069}
3070
6039aa73 3071static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3072{
63d2bc1b 3073 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3074 struct hci_chan *chan;
3075 struct sk_buff *skb;
3076 int quote;
bd1eb66b 3077 u8 type;
b71d385a 3078
63d2bc1b 3079 __check_timeout(hdev, cnt);
b71d385a 3080
bd1eb66b
AE
3081 BT_DBG("%s", hdev->name);
3082
3083 if (hdev->dev_type == HCI_AMP)
3084 type = AMP_LINK;
3085 else
3086 type = ACL_LINK;
3087
b71d385a 3088 while (hdev->block_cnt > 0 &&
bd1eb66b 3089 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3090 u32 priority = (skb_peek(&chan->data_q))->priority;
3091 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3092 int blocks;
3093
3094 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3095 skb->len, skb->priority);
b71d385a
AE
3096
3097 /* Stop if priority has changed */
3098 if (skb->priority < priority)
3099 break;
3100
3101 skb = skb_dequeue(&chan->data_q);
3102
3103 blocks = __get_blocks(hdev, skb);
3104 if (blocks > hdev->block_cnt)
3105 return;
3106
3107 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3108 bt_cb(skb)->force_active);
b71d385a
AE
3109
3110 hci_send_frame(skb);
3111 hdev->acl_last_tx = jiffies;
3112
3113 hdev->block_cnt -= blocks;
3114 quote -= blocks;
3115
3116 chan->sent += blocks;
3117 chan->conn->sent += blocks;
3118 }
3119 }
3120
3121 if (cnt != hdev->block_cnt)
bd1eb66b 3122 hci_prio_recalculate(hdev, type);
b71d385a
AE
3123}
3124
6039aa73 3125static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3126{
3127 BT_DBG("%s", hdev->name);
3128
bd1eb66b
AE
3129 /* No ACL link over BR/EDR controller */
3130 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3131 return;
3132
3133 /* No AMP link over AMP controller */
3134 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3135 return;
3136
3137 switch (hdev->flow_ctl_mode) {
3138 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3139 hci_sched_acl_pkt(hdev);
3140 break;
3141
3142 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3143 hci_sched_acl_blk(hdev);
3144 break;
3145 }
3146}
3147
1da177e4 3148/* Schedule SCO */
6039aa73 3149static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3150{
3151 struct hci_conn *conn;
3152 struct sk_buff *skb;
3153 int quote;
3154
3155 BT_DBG("%s", hdev->name);
3156
52087a79
LAD
3157 if (!hci_conn_num(hdev, SCO_LINK))
3158 return;
3159
1da177e4
LT
3160 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3161 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3162 BT_DBG("skb %p len %d", skb, skb->len);
3163 hci_send_frame(skb);
3164
3165 conn->sent++;
3166 if (conn->sent == ~0)
3167 conn->sent = 0;
3168 }
3169 }
3170}
3171
6039aa73 3172static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3173{
3174 struct hci_conn *conn;
3175 struct sk_buff *skb;
3176 int quote;
3177
3178 BT_DBG("%s", hdev->name);
3179
52087a79
LAD
3180 if (!hci_conn_num(hdev, ESCO_LINK))
3181 return;
3182
8fc9ced3
GP
3183 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3184 &quote))) {
b6a0dc82
MH
3185 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3186 BT_DBG("skb %p len %d", skb, skb->len);
3187 hci_send_frame(skb);
3188
3189 conn->sent++;
3190 if (conn->sent == ~0)
3191 conn->sent = 0;
3192 }
3193 }
3194}
3195
6039aa73 3196static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3197{
73d80deb 3198 struct hci_chan *chan;
6ed58ec5 3199 struct sk_buff *skb;
02b20f0b 3200 int quote, cnt, tmp;
6ed58ec5
VT
3201
3202 BT_DBG("%s", hdev->name);
3203
52087a79
LAD
3204 if (!hci_conn_num(hdev, LE_LINK))
3205 return;
3206
6ed58ec5
VT
3207 if (!test_bit(HCI_RAW, &hdev->flags)) {
3208 /* LE tx timeout must be longer than maximum
3209 * link supervision timeout (40.9 seconds) */
bae1f5d9 3210 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3211 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3212 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3213 }
3214
3215 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3216 tmp = cnt;
73d80deb 3217 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3218 u32 priority = (skb_peek(&chan->data_q))->priority;
3219 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3220 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3221 skb->len, skb->priority);
6ed58ec5 3222
ec1cce24
LAD
3223 /* Stop if priority has changed */
3224 if (skb->priority < priority)
3225 break;
3226
3227 skb = skb_dequeue(&chan->data_q);
3228
6ed58ec5
VT
3229 hci_send_frame(skb);
3230 hdev->le_last_tx = jiffies;
3231
3232 cnt--;
73d80deb
LAD
3233 chan->sent++;
3234 chan->conn->sent++;
6ed58ec5
VT
3235 }
3236 }
73d80deb 3237
6ed58ec5
VT
3238 if (hdev->le_pkts)
3239 hdev->le_cnt = cnt;
3240 else
3241 hdev->acl_cnt = cnt;
02b20f0b
LAD
3242
3243 if (cnt != tmp)
3244 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3245}
3246
3eff45ea 3247static void hci_tx_work(struct work_struct *work)
1da177e4 3248{
3eff45ea 3249 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3250 struct sk_buff *skb;
3251
6ed58ec5 3252 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3253 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3254
3255 /* Schedule queues and send stuff to HCI driver */
3256
3257 hci_sched_acl(hdev);
3258
3259 hci_sched_sco(hdev);
3260
b6a0dc82
MH
3261 hci_sched_esco(hdev);
3262
6ed58ec5
VT
3263 hci_sched_le(hdev);
3264
1da177e4
LT
3265 /* Send next queued raw (unknown type) packet */
3266 while ((skb = skb_dequeue(&hdev->raw_q)))
3267 hci_send_frame(skb);
1da177e4
LT
3268}
3269
25985edc 3270/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3271
3272/* ACL data packet */
6039aa73 3273static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3274{
3275 struct hci_acl_hdr *hdr = (void *) skb->data;
3276 struct hci_conn *conn;
3277 __u16 handle, flags;
3278
3279 skb_pull(skb, HCI_ACL_HDR_SIZE);
3280
3281 handle = __le16_to_cpu(hdr->handle);
3282 flags = hci_flags(handle);
3283 handle = hci_handle(handle);
3284
f0e09510 3285 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3286 handle, flags);
1da177e4
LT
3287
3288 hdev->stat.acl_rx++;
3289
3290 hci_dev_lock(hdev);
3291 conn = hci_conn_hash_lookup_handle(hdev, handle);
3292 hci_dev_unlock(hdev);
8e87d142 3293
1da177e4 3294 if (conn) {
65983fc7 3295 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3296
1da177e4 3297 /* Send to upper protocol */
686ebf28
UF
3298 l2cap_recv_acldata(conn, skb, flags);
3299 return;
1da177e4 3300 } else {
8e87d142 3301 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3302 hdev->name, handle);
1da177e4
LT
3303 }
3304
3305 kfree_skb(skb);
3306}
3307
3308/* SCO data packet */
6039aa73 3309static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3310{
3311 struct hci_sco_hdr *hdr = (void *) skb->data;
3312 struct hci_conn *conn;
3313 __u16 handle;
3314
3315 skb_pull(skb, HCI_SCO_HDR_SIZE);
3316
3317 handle = __le16_to_cpu(hdr->handle);
3318
f0e09510 3319 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3320
3321 hdev->stat.sco_rx++;
3322
3323 hci_dev_lock(hdev);
3324 conn = hci_conn_hash_lookup_handle(hdev, handle);
3325 hci_dev_unlock(hdev);
3326
3327 if (conn) {
1da177e4 3328 /* Send to upper protocol */
686ebf28
UF
3329 sco_recv_scodata(conn, skb);
3330 return;
1da177e4 3331 } else {
8e87d142 3332 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3333 hdev->name, handle);
1da177e4
LT
3334 }
3335
3336 kfree_skb(skb);
3337}
3338
9238f36a
JH
3339static bool hci_req_is_complete(struct hci_dev *hdev)
3340{
3341 struct sk_buff *skb;
3342
3343 skb = skb_peek(&hdev->cmd_q);
3344 if (!skb)
3345 return true;
3346
3347 return bt_cb(skb)->req.start;
3348}
3349
42c6b129
JH
3350static void hci_resend_last(struct hci_dev *hdev)
3351{
3352 struct hci_command_hdr *sent;
3353 struct sk_buff *skb;
3354 u16 opcode;
3355
3356 if (!hdev->sent_cmd)
3357 return;
3358
3359 sent = (void *) hdev->sent_cmd->data;
3360 opcode = __le16_to_cpu(sent->opcode);
3361 if (opcode == HCI_OP_RESET)
3362 return;
3363
3364 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3365 if (!skb)
3366 return;
3367
3368 skb_queue_head(&hdev->cmd_q, skb);
3369 queue_work(hdev->workqueue, &hdev->cmd_work);
3370}
3371
9238f36a
JH
3372void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3373{
3374 hci_req_complete_t req_complete = NULL;
3375 struct sk_buff *skb;
3376 unsigned long flags;
3377
3378 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3379
42c6b129
JH
3380 /* If the completed command doesn't match the last one that was
3381 * sent we need to do special handling of it.
9238f36a 3382 */
42c6b129
JH
3383 if (!hci_sent_cmd_data(hdev, opcode)) {
3384 /* Some CSR based controllers generate a spontaneous
3385 * reset complete event during init and any pending
3386 * command will never be completed. In such a case we
3387 * need to resend whatever was the last sent
3388 * command.
3389 */
3390 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3391 hci_resend_last(hdev);
3392
9238f36a 3393 return;
42c6b129 3394 }
9238f36a
JH
3395
3396 /* If the command succeeded and there's still more commands in
3397 * this request the request is not yet complete.
3398 */
3399 if (!status && !hci_req_is_complete(hdev))
3400 return;
3401
3402 /* If this was the last command in a request the complete
3403 * callback would be found in hdev->sent_cmd instead of the
3404 * command queue (hdev->cmd_q).
3405 */
3406 if (hdev->sent_cmd) {
3407 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3408 if (req_complete)
3409 goto call_complete;
3410 }
3411
3412 /* Remove all pending commands belonging to this request */
3413 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3414 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3415 if (bt_cb(skb)->req.start) {
3416 __skb_queue_head(&hdev->cmd_q, skb);
3417 break;
3418 }
3419
3420 req_complete = bt_cb(skb)->req.complete;
3421 kfree_skb(skb);
3422 }
3423 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3424
3425call_complete:
3426 if (req_complete)
3427 req_complete(hdev, status);
3428}
3429
b78752cc 3430static void hci_rx_work(struct work_struct *work)
1da177e4 3431{
b78752cc 3432 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3433 struct sk_buff *skb;
3434
3435 BT_DBG("%s", hdev->name);
3436
1da177e4 3437 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3438 /* Send copy to monitor */
3439 hci_send_to_monitor(hdev, skb);
3440
1da177e4
LT
3441 if (atomic_read(&hdev->promisc)) {
3442 /* Send copy to the sockets */
470fe1b5 3443 hci_send_to_sock(hdev, skb);
1da177e4
LT
3444 }
3445
3446 if (test_bit(HCI_RAW, &hdev->flags)) {
3447 kfree_skb(skb);
3448 continue;
3449 }
3450
3451 if (test_bit(HCI_INIT, &hdev->flags)) {
3452 /* Don't process data packets in this states. */
0d48d939 3453 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3454 case HCI_ACLDATA_PKT:
3455 case HCI_SCODATA_PKT:
3456 kfree_skb(skb);
3457 continue;
3ff50b79 3458 }
1da177e4
LT
3459 }
3460
3461 /* Process frame */
0d48d939 3462 switch (bt_cb(skb)->pkt_type) {
1da177e4 3463 case HCI_EVENT_PKT:
b78752cc 3464 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3465 hci_event_packet(hdev, skb);
3466 break;
3467
3468 case HCI_ACLDATA_PKT:
3469 BT_DBG("%s ACL data packet", hdev->name);
3470 hci_acldata_packet(hdev, skb);
3471 break;
3472
3473 case HCI_SCODATA_PKT:
3474 BT_DBG("%s SCO data packet", hdev->name);
3475 hci_scodata_packet(hdev, skb);
3476 break;
3477
3478 default:
3479 kfree_skb(skb);
3480 break;
3481 }
3482 }
1da177e4
LT
3483}
3484
c347b765 3485static void hci_cmd_work(struct work_struct *work)
1da177e4 3486{
c347b765 3487 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3488 struct sk_buff *skb;
3489
2104786b
AE
3490 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3491 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3492
1da177e4 3493 /* Send queued commands */
5a08ecce
AE
3494 if (atomic_read(&hdev->cmd_cnt)) {
3495 skb = skb_dequeue(&hdev->cmd_q);
3496 if (!skb)
3497 return;
3498
7585b97a 3499 kfree_skb(hdev->sent_cmd);
1da177e4 3500
70f23020
AE
3501 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3502 if (hdev->sent_cmd) {
1da177e4
LT
3503 atomic_dec(&hdev->cmd_cnt);
3504 hci_send_frame(skb);
7bdb8a5c
SJ
3505 if (test_bit(HCI_RESET, &hdev->flags))
3506 del_timer(&hdev->cmd_timer);
3507 else
3508 mod_timer(&hdev->cmd_timer,
5f246e89 3509 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3510 } else {
3511 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3512 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3513 }
3514 }
3515}
2519a1fc
AG
3516
3517int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3518{
3519 /* General inquiry access code (GIAC) */
3520 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3521 struct hci_cp_inquiry cp;
3522
3523 BT_DBG("%s", hdev->name);
3524
3525 if (test_bit(HCI_INQUIRY, &hdev->flags))
3526 return -EINPROGRESS;
3527
4663262c
JH
3528 inquiry_cache_flush(hdev);
3529
2519a1fc
AG
3530 memset(&cp, 0, sizeof(cp));
3531 memcpy(&cp.lap, lap, sizeof(cp.lap));
3532 cp.length = length;
3533
3534 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3535}
023d5049
AG
3536
3537int hci_cancel_inquiry(struct hci_dev *hdev)
3538{
3539 BT_DBG("%s", hdev->name);
3540
3541 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 3542 return -EALREADY;
023d5049
AG
3543
3544 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3545}
31f7956c
AG
3546
3547u8 bdaddr_to_le(u8 bdaddr_type)
3548{
3549 switch (bdaddr_type) {
3550 case BDADDR_LE_PUBLIC:
3551 return ADDR_LE_DEV_PUBLIC;
3552
3553 default:
3554 /* Fallback to LE Random address type */
3555 return ADDR_LE_DEV_RANDOM;
3556 }
3557}