Bluetooth: Fix HCI request framework
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
01178cd4 83static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
84 void (*func)(struct hci_request *req,
85 unsigned long opt),
01178cd4 86 unsigned long opt, __u32 timeout)
1da177e4 87{
42c6b129 88 struct hci_request req;
1da177e4
LT
89 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
42c6b129
JH
94 hci_req_init(&req, hdev);
95
1da177e4
LT
96 hdev->req_status = HCI_REQ_PEND;
97
42c6b129 98 func(&req, opt);
53cce22d 99
42c6b129
JH
100 err = hci_req_run(&req, hci_req_sync_complete);
101 if (err < 0) {
53cce22d 102 hdev->req_status = 0;
920c8300
AG
103
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
42c6b129 108 */
920c8300
AG
109 if (err == -ENODATA)
110 return 0;
111
112 return err;
53cce22d
JH
113 }
114
bc4445c7
AG
115 add_wait_queue(&hdev->req_wait_q, &wait);
116 set_current_state(TASK_INTERRUPTIBLE);
117
1da177e4
LT
118 schedule_timeout(timeout);
119
120 remove_wait_queue(&hdev->req_wait_q, &wait);
121
122 if (signal_pending(current))
123 return -EINTR;
124
125 switch (hdev->req_status) {
126 case HCI_REQ_DONE:
e175072f 127 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
128 break;
129
130 case HCI_REQ_CANCELED:
131 err = -hdev->req_result;
132 break;
133
134 default:
135 err = -ETIMEDOUT;
136 break;
3ff50b79 137 }
1da177e4 138
a5040efa 139 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
140
141 BT_DBG("%s end: err %d", hdev->name, err);
142
143 return err;
144}
145
01178cd4 146static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
147 void (*req)(struct hci_request *req,
148 unsigned long opt),
01178cd4 149 unsigned long opt, __u32 timeout)
1da177e4
LT
150{
151 int ret;
152
7c6a329e
MH
153 if (!test_bit(HCI_UP, &hdev->flags))
154 return -ENETDOWN;
155
1da177e4
LT
156 /* Serialize all requests */
157 hci_req_lock(hdev);
01178cd4 158 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
159 hci_req_unlock(hdev);
160
161 return ret;
162}
163
42c6b129 164static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 165{
42c6b129 166 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
167
168 /* Reset device */
42c6b129
JH
169 set_bit(HCI_RESET, &req->hdev->flags);
170 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
171}
172
42c6b129 173static void bredr_init(struct hci_request *req)
1da177e4 174{
42c6b129 175 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 176
1da177e4 177 /* Read Local Supported Features */
42c6b129 178 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 179
1143e5a6 180 /* Read Local Version */
42c6b129 181 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
182
183 /* Read BD Address */
42c6b129 184 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
185}
186
42c6b129 187static void amp_init(struct hci_request *req)
e61ef499 188{
42c6b129 189 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 190
e61ef499 191 /* Read Local Version */
42c6b129 192 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
193
194 /* Read Local AMP Info */
42c6b129 195 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
196
197 /* Read Data Blk size */
42c6b129 198 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
199}
200
42c6b129 201static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 202{
42c6b129
JH
203 struct hci_dev *hdev = req->hdev;
204 struct hci_request init_req;
e61ef499
AE
205 struct sk_buff *skb;
206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
42c6b129
JH
211 hci_req_init(&init_req, hdev);
212
e61ef499
AE
213 /* Special commands */
214 while ((skb = skb_dequeue(&hdev->driver_init))) {
215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216 skb->dev = (void *) hdev;
217
42c6b129
JH
218 if (skb_queue_empty(&init_req.cmd_q))
219 bt_cb(skb)->req.start = true;
220
221 skb_queue_tail(&init_req.cmd_q, skb);
e61ef499
AE
222 }
223 skb_queue_purge(&hdev->driver_init);
224
42c6b129
JH
225 hci_req_run(&init_req, NULL);
226
11778716
AE
227 /* Reset */
228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 229 hci_reset_req(req, 0);
11778716 230
e61ef499
AE
231 switch (hdev->dev_type) {
232 case HCI_BREDR:
42c6b129 233 bredr_init(req);
e61ef499
AE
234 break;
235
236 case HCI_AMP:
42c6b129 237 amp_init(req);
e61ef499
AE
238 break;
239
240 default:
241 BT_ERR("Unknown device type %d", hdev->dev_type);
242 break;
243 }
e61ef499
AE
244}
245
42c6b129 246static void bredr_setup(struct hci_request *req)
2177bab5
JH
247{
248 struct hci_cp_delete_stored_link_key cp;
249 __le16 param;
250 __u8 flt_type;
251
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 253 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
254
255 /* Read Class of Device */
42c6b129 256 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
257
258 /* Read Local Name */
42c6b129 259 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
260
261 /* Read Voice Setting */
42c6b129 262 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
263
264 /* Clear Event Filters */
265 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 266 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
267
268 /* Connection accept timeout ~20 secs */
269 param = __constant_cpu_to_le16(0x7d00);
42c6b129 270 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
271
272 bacpy(&cp.bdaddr, BDADDR_ANY);
273 cp.delete_all = 0x01;
42c6b129 274 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
f332ec66
JH
275
276 /* Read page scan parameters */
277 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
278 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
279 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
280 }
2177bab5
JH
281}
282
42c6b129 283static void le_setup(struct hci_request *req)
2177bab5
JH
284{
285 /* Read LE Buffer Size */
42c6b129 286 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
287
288 /* Read LE Local Supported Features */
42c6b129 289 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
290
291 /* Read LE Advertising Channel TX Power */
42c6b129 292 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
293
294 /* Read LE White List Size */
42c6b129 295 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
296
297 /* Read LE Supported States */
42c6b129 298 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
2177bab5
JH
299}
300
301static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
302{
303 if (lmp_ext_inq_capable(hdev))
304 return 0x02;
305
306 if (lmp_inq_rssi_capable(hdev))
307 return 0x01;
308
309 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
310 hdev->lmp_subver == 0x0757)
311 return 0x01;
312
313 if (hdev->manufacturer == 15) {
314 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
315 return 0x01;
316 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
317 return 0x01;
318 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
319 return 0x01;
320 }
321
322 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
323 hdev->lmp_subver == 0x1805)
324 return 0x01;
325
326 return 0x00;
327}
328
42c6b129 329static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
330{
331 u8 mode;
332
42c6b129 333 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 334
42c6b129 335 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
336}
337
42c6b129 338static void hci_setup_event_mask(struct hci_request *req)
2177bab5 339{
42c6b129
JH
340 struct hci_dev *hdev = req->hdev;
341
2177bab5
JH
342 /* The second byte is 0xff instead of 0x9f (two reserved bits
343 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
344 * command otherwise.
345 */
346 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
347
348 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
349 * any event mask for pre 1.2 devices.
350 */
351 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
352 return;
353
354 if (lmp_bredr_capable(hdev)) {
355 events[4] |= 0x01; /* Flow Specification Complete */
356 events[4] |= 0x02; /* Inquiry Result with RSSI */
357 events[4] |= 0x04; /* Read Remote Extended Features Complete */
358 events[5] |= 0x08; /* Synchronous Connection Complete */
359 events[5] |= 0x10; /* Synchronous Connection Changed */
360 }
361
362 if (lmp_inq_rssi_capable(hdev))
363 events[4] |= 0x02; /* Inquiry Result with RSSI */
364
365 if (lmp_sniffsubr_capable(hdev))
366 events[5] |= 0x20; /* Sniff Subrating */
367
368 if (lmp_pause_enc_capable(hdev))
369 events[5] |= 0x80; /* Encryption Key Refresh Complete */
370
371 if (lmp_ext_inq_capable(hdev))
372 events[5] |= 0x40; /* Extended Inquiry Result */
373
374 if (lmp_no_flush_capable(hdev))
375 events[7] |= 0x01; /* Enhanced Flush Complete */
376
377 if (lmp_lsto_capable(hdev))
378 events[6] |= 0x80; /* Link Supervision Timeout Changed */
379
380 if (lmp_ssp_capable(hdev)) {
381 events[6] |= 0x01; /* IO Capability Request */
382 events[6] |= 0x02; /* IO Capability Response */
383 events[6] |= 0x04; /* User Confirmation Request */
384 events[6] |= 0x08; /* User Passkey Request */
385 events[6] |= 0x10; /* Remote OOB Data Request */
386 events[6] |= 0x20; /* Simple Pairing Complete */
387 events[7] |= 0x04; /* User Passkey Notification */
388 events[7] |= 0x08; /* Keypress Notification */
389 events[7] |= 0x10; /* Remote Host Supported
390 * Features Notification
391 */
392 }
393
394 if (lmp_le_capable(hdev))
395 events[7] |= 0x20; /* LE Meta-Event */
396
42c6b129 397 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
398
399 if (lmp_le_capable(hdev)) {
400 memset(events, 0, sizeof(events));
401 events[0] = 0x1f;
42c6b129
JH
402 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
403 sizeof(events), events);
2177bab5
JH
404 }
405}
406
42c6b129 407static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 408{
42c6b129
JH
409 struct hci_dev *hdev = req->hdev;
410
2177bab5 411 if (lmp_bredr_capable(hdev))
42c6b129 412 bredr_setup(req);
2177bab5
JH
413
414 if (lmp_le_capable(hdev))
42c6b129 415 le_setup(req);
2177bab5 416
42c6b129 417 hci_setup_event_mask(req);
2177bab5
JH
418
419 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 420 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
421
422 if (lmp_ssp_capable(hdev)) {
423 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
424 u8 mode = 0x01;
42c6b129
JH
425 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
426 sizeof(mode), &mode);
2177bab5
JH
427 } else {
428 struct hci_cp_write_eir cp;
429
430 memset(hdev->eir, 0, sizeof(hdev->eir));
431 memset(&cp, 0, sizeof(cp));
432
42c6b129 433 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
434 }
435 }
436
437 if (lmp_inq_rssi_capable(hdev))
42c6b129 438 hci_setup_inquiry_mode(req);
2177bab5
JH
439
440 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 441 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
442
443 if (lmp_ext_feat_capable(hdev)) {
444 struct hci_cp_read_local_ext_features cp;
445
446 cp.page = 0x01;
42c6b129
JH
447 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
448 sizeof(cp), &cp);
2177bab5
JH
449 }
450
451 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
452 u8 enable = 1;
42c6b129
JH
453 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
454 &enable);
2177bab5
JH
455 }
456}
457
42c6b129 458static void hci_setup_link_policy(struct hci_request *req)
2177bab5 459{
42c6b129 460 struct hci_dev *hdev = req->hdev;
2177bab5
JH
461 struct hci_cp_write_def_link_policy cp;
462 u16 link_policy = 0;
463
464 if (lmp_rswitch_capable(hdev))
465 link_policy |= HCI_LP_RSWITCH;
466 if (lmp_hold_capable(hdev))
467 link_policy |= HCI_LP_HOLD;
468 if (lmp_sniff_capable(hdev))
469 link_policy |= HCI_LP_SNIFF;
470 if (lmp_park_capable(hdev))
471 link_policy |= HCI_LP_PARK;
472
473 cp.policy = cpu_to_le16(link_policy);
42c6b129 474 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
475}
476
42c6b129 477static void hci_set_le_support(struct hci_request *req)
2177bab5 478{
42c6b129 479 struct hci_dev *hdev = req->hdev;
2177bab5
JH
480 struct hci_cp_write_le_host_supported cp;
481
482 memset(&cp, 0, sizeof(cp));
483
484 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
485 cp.le = 0x01;
486 cp.simul = lmp_le_br_capable(hdev);
487 }
488
489 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
490 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
491 &cp);
2177bab5
JH
492}
493
42c6b129 494static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 495{
42c6b129
JH
496 struct hci_dev *hdev = req->hdev;
497
2177bab5 498 if (hdev->commands[5] & 0x10)
42c6b129 499 hci_setup_link_policy(req);
2177bab5 500
04b4edcb 501 if (lmp_le_capable(hdev)) {
42c6b129 502 hci_set_le_support(req);
04b4edcb
JH
503 hci_update_ad(req);
504 }
2177bab5
JH
505}
506
507static int __hci_init(struct hci_dev *hdev)
508{
509 int err;
510
511 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
512 if (err < 0)
513 return err;
514
515 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
516 * BR/EDR/LE type controllers. AMP controllers only need the
517 * first stage init.
518 */
519 if (hdev->dev_type != HCI_BREDR)
520 return 0;
521
522 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
523 if (err < 0)
524 return err;
525
526 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
527}
528
42c6b129 529static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
530{
531 __u8 scan = opt;
532
42c6b129 533 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
534
535 /* Inquiry and Page scans */
42c6b129 536 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
537}
538
42c6b129 539static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
540{
541 __u8 auth = opt;
542
42c6b129 543 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
544
545 /* Authentication */
42c6b129 546 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
547}
548
42c6b129 549static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
550{
551 __u8 encrypt = opt;
552
42c6b129 553 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 554
e4e8e37c 555 /* Encryption */
42c6b129 556 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
557}
558
42c6b129 559static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
560{
561 __le16 policy = cpu_to_le16(opt);
562
42c6b129 563 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
564
565 /* Default link policy */
42c6b129 566 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
567}
568
8e87d142 569/* Get HCI device by index.
1da177e4
LT
570 * Device is held on return. */
571struct hci_dev *hci_dev_get(int index)
572{
8035ded4 573 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
574
575 BT_DBG("%d", index);
576
577 if (index < 0)
578 return NULL;
579
580 read_lock(&hci_dev_list_lock);
8035ded4 581 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
582 if (d->id == index) {
583 hdev = hci_dev_hold(d);
584 break;
585 }
586 }
587 read_unlock(&hci_dev_list_lock);
588 return hdev;
589}
1da177e4
LT
590
591/* ---- Inquiry support ---- */
ff9ef578 592
30dc78e1
JH
593bool hci_discovery_active(struct hci_dev *hdev)
594{
595 struct discovery_state *discov = &hdev->discovery;
596
6fbe195d 597 switch (discov->state) {
343f935b 598 case DISCOVERY_FINDING:
6fbe195d 599 case DISCOVERY_RESOLVING:
30dc78e1
JH
600 return true;
601
6fbe195d
AG
602 default:
603 return false;
604 }
30dc78e1
JH
605}
606
ff9ef578
JH
607void hci_discovery_set_state(struct hci_dev *hdev, int state)
608{
609 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
610
611 if (hdev->discovery.state == state)
612 return;
613
614 switch (state) {
615 case DISCOVERY_STOPPED:
7b99b659
AG
616 if (hdev->discovery.state != DISCOVERY_STARTING)
617 mgmt_discovering(hdev, 0);
ff9ef578
JH
618 break;
619 case DISCOVERY_STARTING:
620 break;
343f935b 621 case DISCOVERY_FINDING:
ff9ef578
JH
622 mgmt_discovering(hdev, 1);
623 break;
30dc78e1
JH
624 case DISCOVERY_RESOLVING:
625 break;
ff9ef578
JH
626 case DISCOVERY_STOPPING:
627 break;
628 }
629
630 hdev->discovery.state = state;
631}
632
1da177e4
LT
633static void inquiry_cache_flush(struct hci_dev *hdev)
634{
30883512 635 struct discovery_state *cache = &hdev->discovery;
b57c1a56 636 struct inquiry_entry *p, *n;
1da177e4 637
561aafbc
JH
638 list_for_each_entry_safe(p, n, &cache->all, all) {
639 list_del(&p->all);
b57c1a56 640 kfree(p);
1da177e4 641 }
561aafbc
JH
642
643 INIT_LIST_HEAD(&cache->unknown);
644 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
645}
646
a8c5fb1a
GP
647struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
648 bdaddr_t *bdaddr)
1da177e4 649{
30883512 650 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
651 struct inquiry_entry *e;
652
6ed93dc6 653 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 654
561aafbc
JH
655 list_for_each_entry(e, &cache->all, all) {
656 if (!bacmp(&e->data.bdaddr, bdaddr))
657 return e;
658 }
659
660 return NULL;
661}
662
663struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 664 bdaddr_t *bdaddr)
561aafbc 665{
30883512 666 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
667 struct inquiry_entry *e;
668
6ed93dc6 669 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
670
671 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 672 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
673 return e;
674 }
675
676 return NULL;
1da177e4
LT
677}
678
30dc78e1 679struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
680 bdaddr_t *bdaddr,
681 int state)
30dc78e1
JH
682{
683 struct discovery_state *cache = &hdev->discovery;
684 struct inquiry_entry *e;
685
6ed93dc6 686 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
687
688 list_for_each_entry(e, &cache->resolve, list) {
689 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
690 return e;
691 if (!bacmp(&e->data.bdaddr, bdaddr))
692 return e;
693 }
694
695 return NULL;
696}
697
a3d4e20a 698void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 699 struct inquiry_entry *ie)
a3d4e20a
JH
700{
701 struct discovery_state *cache = &hdev->discovery;
702 struct list_head *pos = &cache->resolve;
703 struct inquiry_entry *p;
704
705 list_del(&ie->list);
706
707 list_for_each_entry(p, &cache->resolve, list) {
708 if (p->name_state != NAME_PENDING &&
a8c5fb1a 709 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
710 break;
711 pos = &p->list;
712 }
713
714 list_add(&ie->list, pos);
715}
716
3175405b 717bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 718 bool name_known, bool *ssp)
1da177e4 719{
30883512 720 struct discovery_state *cache = &hdev->discovery;
70f23020 721 struct inquiry_entry *ie;
1da177e4 722
6ed93dc6 723 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 724
2b2fec4d
SJ
725 hci_remove_remote_oob_data(hdev, &data->bdaddr);
726
388fc8fa
JH
727 if (ssp)
728 *ssp = data->ssp_mode;
729
70f23020 730 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 731 if (ie) {
388fc8fa
JH
732 if (ie->data.ssp_mode && ssp)
733 *ssp = true;
734
a3d4e20a 735 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 736 data->rssi != ie->data.rssi) {
a3d4e20a
JH
737 ie->data.rssi = data->rssi;
738 hci_inquiry_cache_update_resolve(hdev, ie);
739 }
740
561aafbc 741 goto update;
a3d4e20a 742 }
561aafbc
JH
743
744 /* Entry not in the cache. Add new one. */
745 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
746 if (!ie)
3175405b 747 return false;
561aafbc
JH
748
749 list_add(&ie->all, &cache->all);
750
751 if (name_known) {
752 ie->name_state = NAME_KNOWN;
753 } else {
754 ie->name_state = NAME_NOT_KNOWN;
755 list_add(&ie->list, &cache->unknown);
756 }
70f23020 757
561aafbc
JH
758update:
759 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 760 ie->name_state != NAME_PENDING) {
561aafbc
JH
761 ie->name_state = NAME_KNOWN;
762 list_del(&ie->list);
1da177e4
LT
763 }
764
70f23020
AE
765 memcpy(&ie->data, data, sizeof(*data));
766 ie->timestamp = jiffies;
1da177e4 767 cache->timestamp = jiffies;
3175405b
JH
768
769 if (ie->name_state == NAME_NOT_KNOWN)
770 return false;
771
772 return true;
1da177e4
LT
773}
774
775static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
776{
30883512 777 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
778 struct inquiry_info *info = (struct inquiry_info *) buf;
779 struct inquiry_entry *e;
780 int copied = 0;
781
561aafbc 782 list_for_each_entry(e, &cache->all, all) {
1da177e4 783 struct inquiry_data *data = &e->data;
b57c1a56
JH
784
785 if (copied >= num)
786 break;
787
1da177e4
LT
788 bacpy(&info->bdaddr, &data->bdaddr);
789 info->pscan_rep_mode = data->pscan_rep_mode;
790 info->pscan_period_mode = data->pscan_period_mode;
791 info->pscan_mode = data->pscan_mode;
792 memcpy(info->dev_class, data->dev_class, 3);
793 info->clock_offset = data->clock_offset;
b57c1a56 794
1da177e4 795 info++;
b57c1a56 796 copied++;
1da177e4
LT
797 }
798
799 BT_DBG("cache %p, copied %d", cache, copied);
800 return copied;
801}
802
42c6b129 803static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
804{
805 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 806 struct hci_dev *hdev = req->hdev;
1da177e4
LT
807 struct hci_cp_inquiry cp;
808
809 BT_DBG("%s", hdev->name);
810
811 if (test_bit(HCI_INQUIRY, &hdev->flags))
812 return;
813
814 /* Start Inquiry */
815 memcpy(&cp.lap, &ir->lap, 3);
816 cp.length = ir->length;
817 cp.num_rsp = ir->num_rsp;
42c6b129 818 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
819}
820
821int hci_inquiry(void __user *arg)
822{
823 __u8 __user *ptr = arg;
824 struct hci_inquiry_req ir;
825 struct hci_dev *hdev;
826 int err = 0, do_inquiry = 0, max_rsp;
827 long timeo;
828 __u8 *buf;
829
830 if (copy_from_user(&ir, ptr, sizeof(ir)))
831 return -EFAULT;
832
5a08ecce
AE
833 hdev = hci_dev_get(ir.dev_id);
834 if (!hdev)
1da177e4
LT
835 return -ENODEV;
836
09fd0de5 837 hci_dev_lock(hdev);
8e87d142 838 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 839 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
840 inquiry_cache_flush(hdev);
841 do_inquiry = 1;
842 }
09fd0de5 843 hci_dev_unlock(hdev);
1da177e4 844
04837f64 845 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
846
847 if (do_inquiry) {
01178cd4
JH
848 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
849 timeo);
70f23020
AE
850 if (err < 0)
851 goto done;
852 }
1da177e4 853
8fc9ced3
GP
854 /* for unlimited number of responses we will use buffer with
855 * 255 entries
856 */
1da177e4
LT
857 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
858
859 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
860 * copy it to the user space.
861 */
01df8c31 862 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 863 if (!buf) {
1da177e4
LT
864 err = -ENOMEM;
865 goto done;
866 }
867
09fd0de5 868 hci_dev_lock(hdev);
1da177e4 869 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 870 hci_dev_unlock(hdev);
1da177e4
LT
871
872 BT_DBG("num_rsp %d", ir.num_rsp);
873
874 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
875 ptr += sizeof(ir);
876 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 877 ir.num_rsp))
1da177e4 878 err = -EFAULT;
8e87d142 879 } else
1da177e4
LT
880 err = -EFAULT;
881
882 kfree(buf);
883
884done:
885 hci_dev_put(hdev);
886 return err;
887}
888
3f0f524b
JH
889static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
890{
891 u8 ad_len = 0, flags = 0;
892 size_t name_len;
893
894 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
895 flags |= LE_AD_GENERAL;
896
897 if (!lmp_bredr_capable(hdev))
898 flags |= LE_AD_NO_BREDR;
899
900 if (lmp_le_br_capable(hdev))
901 flags |= LE_AD_SIM_LE_BREDR_CTRL;
902
903 if (lmp_host_le_br_capable(hdev))
904 flags |= LE_AD_SIM_LE_BREDR_HOST;
905
906 if (flags) {
907 BT_DBG("adv flags 0x%02x", flags);
908
909 ptr[0] = 2;
910 ptr[1] = EIR_FLAGS;
911 ptr[2] = flags;
912
913 ad_len += 3;
914 ptr += 3;
915 }
916
917 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
918 ptr[0] = 2;
919 ptr[1] = EIR_TX_POWER;
920 ptr[2] = (u8) hdev->adv_tx_power;
921
922 ad_len += 3;
923 ptr += 3;
924 }
925
926 name_len = strlen(hdev->dev_name);
927 if (name_len > 0) {
928 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
929
930 if (name_len > max_len) {
931 name_len = max_len;
932 ptr[1] = EIR_NAME_SHORT;
933 } else
934 ptr[1] = EIR_NAME_COMPLETE;
935
936 ptr[0] = name_len + 1;
937
938 memcpy(ptr + 2, hdev->dev_name, name_len);
939
940 ad_len += (name_len + 2);
941 ptr += (name_len + 2);
942 }
943
944 return ad_len;
945}
946
04b4edcb 947void hci_update_ad(struct hci_request *req)
3f0f524b 948{
04b4edcb 949 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
950 struct hci_cp_le_set_adv_data cp;
951 u8 len;
3f0f524b 952
04b4edcb
JH
953 if (!lmp_le_capable(hdev))
954 return;
3f0f524b
JH
955
956 memset(&cp, 0, sizeof(cp));
957
958 len = create_ad(hdev, cp.data);
959
960 if (hdev->adv_data_len == len &&
04b4edcb
JH
961 memcmp(cp.data, hdev->adv_data, len) == 0)
962 return;
3f0f524b
JH
963
964 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
965 hdev->adv_data_len = len;
966
967 cp.length = len;
3f0f524b 968
04b4edcb 969 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
970}
971
1da177e4
LT
972/* ---- HCI ioctl helpers ---- */
973
974int hci_dev_open(__u16 dev)
975{
976 struct hci_dev *hdev;
977 int ret = 0;
978
5a08ecce
AE
979 hdev = hci_dev_get(dev);
980 if (!hdev)
1da177e4
LT
981 return -ENODEV;
982
983 BT_DBG("%s %p", hdev->name, hdev);
984
985 hci_req_lock(hdev);
986
94324962
JH
987 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
988 ret = -ENODEV;
989 goto done;
990 }
991
611b30f7
MH
992 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
993 ret = -ERFKILL;
994 goto done;
995 }
996
1da177e4
LT
997 if (test_bit(HCI_UP, &hdev->flags)) {
998 ret = -EALREADY;
999 goto done;
1000 }
1001
1002 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1003 set_bit(HCI_RAW, &hdev->flags);
1004
07e3b94a
AE
1005 /* Treat all non BR/EDR controllers as raw devices if
1006 enable_hs is not set */
1007 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
1008 set_bit(HCI_RAW, &hdev->flags);
1009
1da177e4
LT
1010 if (hdev->open(hdev)) {
1011 ret = -EIO;
1012 goto done;
1013 }
1014
1015 if (!test_bit(HCI_RAW, &hdev->flags)) {
1016 atomic_set(&hdev->cmd_cnt, 1);
1017 set_bit(HCI_INIT, &hdev->flags);
2177bab5 1018 ret = __hci_init(hdev);
1da177e4
LT
1019 clear_bit(HCI_INIT, &hdev->flags);
1020 }
1021
1022 if (!ret) {
1023 hci_dev_hold(hdev);
1024 set_bit(HCI_UP, &hdev->flags);
1025 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a
AE
1026 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1027 mgmt_valid_hdev(hdev)) {
09fd0de5 1028 hci_dev_lock(hdev);
744cf19e 1029 mgmt_powered(hdev, 1);
09fd0de5 1030 hci_dev_unlock(hdev);
56e5cb86 1031 }
8e87d142 1032 } else {
1da177e4 1033 /* Init failed, cleanup */
3eff45ea 1034 flush_work(&hdev->tx_work);
c347b765 1035 flush_work(&hdev->cmd_work);
b78752cc 1036 flush_work(&hdev->rx_work);
1da177e4
LT
1037
1038 skb_queue_purge(&hdev->cmd_q);
1039 skb_queue_purge(&hdev->rx_q);
1040
1041 if (hdev->flush)
1042 hdev->flush(hdev);
1043
1044 if (hdev->sent_cmd) {
1045 kfree_skb(hdev->sent_cmd);
1046 hdev->sent_cmd = NULL;
1047 }
1048
1049 hdev->close(hdev);
1050 hdev->flags = 0;
1051 }
1052
1053done:
1054 hci_req_unlock(hdev);
1055 hci_dev_put(hdev);
1056 return ret;
1057}
1058
1059static int hci_dev_do_close(struct hci_dev *hdev)
1060{
1061 BT_DBG("%s %p", hdev->name, hdev);
1062
28b75a89
AG
1063 cancel_work_sync(&hdev->le_scan);
1064
78c04c0b
VCG
1065 cancel_delayed_work(&hdev->power_off);
1066
1da177e4
LT
1067 hci_req_cancel(hdev, ENODEV);
1068 hci_req_lock(hdev);
1069
1070 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1071 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1072 hci_req_unlock(hdev);
1073 return 0;
1074 }
1075
3eff45ea
GP
1076 /* Flush RX and TX works */
1077 flush_work(&hdev->tx_work);
b78752cc 1078 flush_work(&hdev->rx_work);
1da177e4 1079
16ab91ab 1080 if (hdev->discov_timeout > 0) {
e0f9309f 1081 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1082 hdev->discov_timeout = 0;
5e5282bb 1083 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1084 }
1085
a8b2d5c2 1086 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1087 cancel_delayed_work(&hdev->service_cache);
1088
7ba8b4be
AG
1089 cancel_delayed_work_sync(&hdev->le_scan_disable);
1090
09fd0de5 1091 hci_dev_lock(hdev);
1da177e4
LT
1092 inquiry_cache_flush(hdev);
1093 hci_conn_hash_flush(hdev);
09fd0de5 1094 hci_dev_unlock(hdev);
1da177e4
LT
1095
1096 hci_notify(hdev, HCI_DEV_DOWN);
1097
1098 if (hdev->flush)
1099 hdev->flush(hdev);
1100
1101 /* Reset device */
1102 skb_queue_purge(&hdev->cmd_q);
1103 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1104 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1105 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1106 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1107 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1108 clear_bit(HCI_INIT, &hdev->flags);
1109 }
1110
c347b765
GP
1111 /* flush cmd work */
1112 flush_work(&hdev->cmd_work);
1da177e4
LT
1113
1114 /* Drop queues */
1115 skb_queue_purge(&hdev->rx_q);
1116 skb_queue_purge(&hdev->cmd_q);
1117 skb_queue_purge(&hdev->raw_q);
1118
1119 /* Drop last sent command */
1120 if (hdev->sent_cmd) {
b79f44c1 1121 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1122 kfree_skb(hdev->sent_cmd);
1123 hdev->sent_cmd = NULL;
1124 }
1125
1126 /* After this point our queues are empty
1127 * and no tasks are scheduled. */
1128 hdev->close(hdev);
1129
35b973c9
JH
1130 /* Clear flags */
1131 hdev->flags = 0;
1132 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1133
bb4b2a9a
AE
1134 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1135 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1136 hci_dev_lock(hdev);
1137 mgmt_powered(hdev, 0);
1138 hci_dev_unlock(hdev);
1139 }
5add6af8 1140
ced5c338
AE
1141 /* Controller radio is available but is currently powered down */
1142 hdev->amp_status = 0;
1143
e59fda8d 1144 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1145 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1146
1da177e4
LT
1147 hci_req_unlock(hdev);
1148
1149 hci_dev_put(hdev);
1150 return 0;
1151}
1152
1153int hci_dev_close(__u16 dev)
1154{
1155 struct hci_dev *hdev;
1156 int err;
1157
70f23020
AE
1158 hdev = hci_dev_get(dev);
1159 if (!hdev)
1da177e4 1160 return -ENODEV;
8ee56540
MH
1161
1162 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1163 cancel_delayed_work(&hdev->power_off);
1164
1da177e4 1165 err = hci_dev_do_close(hdev);
8ee56540 1166
1da177e4
LT
1167 hci_dev_put(hdev);
1168 return err;
1169}
1170
1171int hci_dev_reset(__u16 dev)
1172{
1173 struct hci_dev *hdev;
1174 int ret = 0;
1175
70f23020
AE
1176 hdev = hci_dev_get(dev);
1177 if (!hdev)
1da177e4
LT
1178 return -ENODEV;
1179
1180 hci_req_lock(hdev);
1da177e4
LT
1181
1182 if (!test_bit(HCI_UP, &hdev->flags))
1183 goto done;
1184
1185 /* Drop queues */
1186 skb_queue_purge(&hdev->rx_q);
1187 skb_queue_purge(&hdev->cmd_q);
1188
09fd0de5 1189 hci_dev_lock(hdev);
1da177e4
LT
1190 inquiry_cache_flush(hdev);
1191 hci_conn_hash_flush(hdev);
09fd0de5 1192 hci_dev_unlock(hdev);
1da177e4
LT
1193
1194 if (hdev->flush)
1195 hdev->flush(hdev);
1196
8e87d142 1197 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1198 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1199
1200 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1201 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1202
1203done:
1da177e4
LT
1204 hci_req_unlock(hdev);
1205 hci_dev_put(hdev);
1206 return ret;
1207}
1208
1209int hci_dev_reset_stat(__u16 dev)
1210{
1211 struct hci_dev *hdev;
1212 int ret = 0;
1213
70f23020
AE
1214 hdev = hci_dev_get(dev);
1215 if (!hdev)
1da177e4
LT
1216 return -ENODEV;
1217
1218 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1219
1220 hci_dev_put(hdev);
1221
1222 return ret;
1223}
1224
1225int hci_dev_cmd(unsigned int cmd, void __user *arg)
1226{
1227 struct hci_dev *hdev;
1228 struct hci_dev_req dr;
1229 int err = 0;
1230
1231 if (copy_from_user(&dr, arg, sizeof(dr)))
1232 return -EFAULT;
1233
70f23020
AE
1234 hdev = hci_dev_get(dr.dev_id);
1235 if (!hdev)
1da177e4
LT
1236 return -ENODEV;
1237
1238 switch (cmd) {
1239 case HCISETAUTH:
01178cd4
JH
1240 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1241 HCI_INIT_TIMEOUT);
1da177e4
LT
1242 break;
1243
1244 case HCISETENCRYPT:
1245 if (!lmp_encrypt_capable(hdev)) {
1246 err = -EOPNOTSUPP;
1247 break;
1248 }
1249
1250 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1251 /* Auth must be enabled first */
01178cd4
JH
1252 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1253 HCI_INIT_TIMEOUT);
1da177e4
LT
1254 if (err)
1255 break;
1256 }
1257
01178cd4
JH
1258 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1259 HCI_INIT_TIMEOUT);
1da177e4
LT
1260 break;
1261
1262 case HCISETSCAN:
01178cd4
JH
1263 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1264 HCI_INIT_TIMEOUT);
1da177e4
LT
1265 break;
1266
1da177e4 1267 case HCISETLINKPOL:
01178cd4
JH
1268 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1269 HCI_INIT_TIMEOUT);
1da177e4
LT
1270 break;
1271
1272 case HCISETLINKMODE:
e4e8e37c
MH
1273 hdev->link_mode = ((__u16) dr.dev_opt) &
1274 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1275 break;
1276
1277 case HCISETPTYPE:
1278 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1279 break;
1280
1281 case HCISETACLMTU:
e4e8e37c
MH
1282 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1283 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1284 break;
1285
1286 case HCISETSCOMTU:
e4e8e37c
MH
1287 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1288 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1289 break;
1290
1291 default:
1292 err = -EINVAL;
1293 break;
1294 }
e4e8e37c 1295
1da177e4
LT
1296 hci_dev_put(hdev);
1297 return err;
1298}
1299
1300int hci_get_dev_list(void __user *arg)
1301{
8035ded4 1302 struct hci_dev *hdev;
1da177e4
LT
1303 struct hci_dev_list_req *dl;
1304 struct hci_dev_req *dr;
1da177e4
LT
1305 int n = 0, size, err;
1306 __u16 dev_num;
1307
1308 if (get_user(dev_num, (__u16 __user *) arg))
1309 return -EFAULT;
1310
1311 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1312 return -EINVAL;
1313
1314 size = sizeof(*dl) + dev_num * sizeof(*dr);
1315
70f23020
AE
1316 dl = kzalloc(size, GFP_KERNEL);
1317 if (!dl)
1da177e4
LT
1318 return -ENOMEM;
1319
1320 dr = dl->dev_req;
1321
f20d09d5 1322 read_lock(&hci_dev_list_lock);
8035ded4 1323 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1324 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1325 cancel_delayed_work(&hdev->power_off);
c542a06c 1326
a8b2d5c2
JH
1327 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1328 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1329
1da177e4
LT
1330 (dr + n)->dev_id = hdev->id;
1331 (dr + n)->dev_opt = hdev->flags;
c542a06c 1332
1da177e4
LT
1333 if (++n >= dev_num)
1334 break;
1335 }
f20d09d5 1336 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1337
1338 dl->dev_num = n;
1339 size = sizeof(*dl) + n * sizeof(*dr);
1340
1341 err = copy_to_user(arg, dl, size);
1342 kfree(dl);
1343
1344 return err ? -EFAULT : 0;
1345}
1346
1347int hci_get_dev_info(void __user *arg)
1348{
1349 struct hci_dev *hdev;
1350 struct hci_dev_info di;
1351 int err = 0;
1352
1353 if (copy_from_user(&di, arg, sizeof(di)))
1354 return -EFAULT;
1355
70f23020
AE
1356 hdev = hci_dev_get(di.dev_id);
1357 if (!hdev)
1da177e4
LT
1358 return -ENODEV;
1359
a8b2d5c2 1360 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1361 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1362
a8b2d5c2
JH
1363 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1364 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1365
1da177e4
LT
1366 strcpy(di.name, hdev->name);
1367 di.bdaddr = hdev->bdaddr;
943da25d 1368 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1369 di.flags = hdev->flags;
1370 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1371 if (lmp_bredr_capable(hdev)) {
1372 di.acl_mtu = hdev->acl_mtu;
1373 di.acl_pkts = hdev->acl_pkts;
1374 di.sco_mtu = hdev->sco_mtu;
1375 di.sco_pkts = hdev->sco_pkts;
1376 } else {
1377 di.acl_mtu = hdev->le_mtu;
1378 di.acl_pkts = hdev->le_pkts;
1379 di.sco_mtu = 0;
1380 di.sco_pkts = 0;
1381 }
1da177e4
LT
1382 di.link_policy = hdev->link_policy;
1383 di.link_mode = hdev->link_mode;
1384
1385 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1386 memcpy(&di.features, &hdev->features, sizeof(di.features));
1387
1388 if (copy_to_user(arg, &di, sizeof(di)))
1389 err = -EFAULT;
1390
1391 hci_dev_put(hdev);
1392
1393 return err;
1394}
1395
1396/* ---- Interface to HCI drivers ---- */
1397
611b30f7
MH
1398static int hci_rfkill_set_block(void *data, bool blocked)
1399{
1400 struct hci_dev *hdev = data;
1401
1402 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1403
1404 if (!blocked)
1405 return 0;
1406
1407 hci_dev_do_close(hdev);
1408
1409 return 0;
1410}
1411
1412static const struct rfkill_ops hci_rfkill_ops = {
1413 .set_block = hci_rfkill_set_block,
1414};
1415
ab81cbf9
JH
1416static void hci_power_on(struct work_struct *work)
1417{
1418 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1419
1420 BT_DBG("%s", hdev->name);
1421
1422 if (hci_dev_open(hdev->id) < 0)
1423 return;
1424
a8b2d5c2 1425 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1426 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1427 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1428
a8b2d5c2 1429 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1430 mgmt_index_added(hdev);
ab81cbf9
JH
1431}
1432
1433static void hci_power_off(struct work_struct *work)
1434{
3243553f 1435 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1436 power_off.work);
ab81cbf9
JH
1437
1438 BT_DBG("%s", hdev->name);
1439
8ee56540 1440 hci_dev_do_close(hdev);
ab81cbf9
JH
1441}
1442
16ab91ab
JH
1443static void hci_discov_off(struct work_struct *work)
1444{
1445 struct hci_dev *hdev;
1446 u8 scan = SCAN_PAGE;
1447
1448 hdev = container_of(work, struct hci_dev, discov_off.work);
1449
1450 BT_DBG("%s", hdev->name);
1451
09fd0de5 1452 hci_dev_lock(hdev);
16ab91ab
JH
1453
1454 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1455
1456 hdev->discov_timeout = 0;
1457
09fd0de5 1458 hci_dev_unlock(hdev);
16ab91ab
JH
1459}
1460
2aeb9a1a
JH
1461int hci_uuids_clear(struct hci_dev *hdev)
1462{
4821002c 1463 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1464
4821002c
JH
1465 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1466 list_del(&uuid->list);
2aeb9a1a
JH
1467 kfree(uuid);
1468 }
1469
1470 return 0;
1471}
1472
55ed8ca1
JH
1473int hci_link_keys_clear(struct hci_dev *hdev)
1474{
1475 struct list_head *p, *n;
1476
1477 list_for_each_safe(p, n, &hdev->link_keys) {
1478 struct link_key *key;
1479
1480 key = list_entry(p, struct link_key, list);
1481
1482 list_del(p);
1483 kfree(key);
1484 }
1485
1486 return 0;
1487}
1488
b899efaf
VCG
1489int hci_smp_ltks_clear(struct hci_dev *hdev)
1490{
1491 struct smp_ltk *k, *tmp;
1492
1493 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1494 list_del(&k->list);
1495 kfree(k);
1496 }
1497
1498 return 0;
1499}
1500
55ed8ca1
JH
1501struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1502{
8035ded4 1503 struct link_key *k;
55ed8ca1 1504
8035ded4 1505 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1506 if (bacmp(bdaddr, &k->bdaddr) == 0)
1507 return k;
55ed8ca1
JH
1508
1509 return NULL;
1510}
1511
745c0ce3 1512static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1513 u8 key_type, u8 old_key_type)
d25e28ab
JH
1514{
1515 /* Legacy key */
1516 if (key_type < 0x03)
745c0ce3 1517 return true;
d25e28ab
JH
1518
1519 /* Debug keys are insecure so don't store them persistently */
1520 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1521 return false;
d25e28ab
JH
1522
1523 /* Changed combination key and there's no previous one */
1524 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1525 return false;
d25e28ab
JH
1526
1527 /* Security mode 3 case */
1528 if (!conn)
745c0ce3 1529 return true;
d25e28ab
JH
1530
1531 /* Neither local nor remote side had no-bonding as requirement */
1532 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1533 return true;
d25e28ab
JH
1534
1535 /* Local side had dedicated bonding as requirement */
1536 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1537 return true;
d25e28ab
JH
1538
1539 /* Remote side had dedicated bonding as requirement */
1540 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1541 return true;
d25e28ab
JH
1542
1543 /* If none of the above criteria match, then don't store the key
1544 * persistently */
745c0ce3 1545 return false;
d25e28ab
JH
1546}
1547
c9839a11 1548struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1549{
c9839a11 1550 struct smp_ltk *k;
75d262c2 1551
c9839a11
VCG
1552 list_for_each_entry(k, &hdev->long_term_keys, list) {
1553 if (k->ediv != ediv ||
a8c5fb1a 1554 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1555 continue;
1556
c9839a11 1557 return k;
75d262c2
VCG
1558 }
1559
1560 return NULL;
1561}
75d262c2 1562
c9839a11 1563struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1564 u8 addr_type)
75d262c2 1565{
c9839a11 1566 struct smp_ltk *k;
75d262c2 1567
c9839a11
VCG
1568 list_for_each_entry(k, &hdev->long_term_keys, list)
1569 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1570 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1571 return k;
1572
1573 return NULL;
1574}
75d262c2 1575
d25e28ab 1576int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1577 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1578{
1579 struct link_key *key, *old_key;
745c0ce3
VA
1580 u8 old_key_type;
1581 bool persistent;
55ed8ca1
JH
1582
1583 old_key = hci_find_link_key(hdev, bdaddr);
1584 if (old_key) {
1585 old_key_type = old_key->type;
1586 key = old_key;
1587 } else {
12adcf3a 1588 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1589 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1590 if (!key)
1591 return -ENOMEM;
1592 list_add(&key->list, &hdev->link_keys);
1593 }
1594
6ed93dc6 1595 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1596
d25e28ab
JH
1597 /* Some buggy controller combinations generate a changed
1598 * combination key for legacy pairing even when there's no
1599 * previous key */
1600 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1601 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1602 type = HCI_LK_COMBINATION;
655fe6ec
JH
1603 if (conn)
1604 conn->key_type = type;
1605 }
d25e28ab 1606
55ed8ca1 1607 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1608 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1609 key->pin_len = pin_len;
1610
b6020ba0 1611 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1612 key->type = old_key_type;
4748fed2
JH
1613 else
1614 key->type = type;
1615
4df378a1
JH
1616 if (!new_key)
1617 return 0;
1618
1619 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1620
744cf19e 1621 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1622
6ec5bcad
VA
1623 if (conn)
1624 conn->flush_key = !persistent;
55ed8ca1
JH
1625
1626 return 0;
1627}
1628
c9839a11 1629int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1630 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1631 ediv, u8 rand[8])
75d262c2 1632{
c9839a11 1633 struct smp_ltk *key, *old_key;
75d262c2 1634
c9839a11
VCG
1635 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1636 return 0;
75d262c2 1637
c9839a11
VCG
1638 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1639 if (old_key)
75d262c2 1640 key = old_key;
c9839a11
VCG
1641 else {
1642 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1643 if (!key)
1644 return -ENOMEM;
c9839a11 1645 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1646 }
1647
75d262c2 1648 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1649 key->bdaddr_type = addr_type;
1650 memcpy(key->val, tk, sizeof(key->val));
1651 key->authenticated = authenticated;
1652 key->ediv = ediv;
1653 key->enc_size = enc_size;
1654 key->type = type;
1655 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1656
c9839a11
VCG
1657 if (!new_key)
1658 return 0;
75d262c2 1659
261cc5aa
VCG
1660 if (type & HCI_SMP_LTK)
1661 mgmt_new_ltk(hdev, key, 1);
1662
75d262c2
VCG
1663 return 0;
1664}
1665
55ed8ca1
JH
1666int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1667{
1668 struct link_key *key;
1669
1670 key = hci_find_link_key(hdev, bdaddr);
1671 if (!key)
1672 return -ENOENT;
1673
6ed93dc6 1674 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1675
1676 list_del(&key->list);
1677 kfree(key);
1678
1679 return 0;
1680}
1681
b899efaf
VCG
1682int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1683{
1684 struct smp_ltk *k, *tmp;
1685
1686 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1687 if (bacmp(bdaddr, &k->bdaddr))
1688 continue;
1689
6ed93dc6 1690 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1691
1692 list_del(&k->list);
1693 kfree(k);
1694 }
1695
1696 return 0;
1697}
1698
6bd32326 1699/* HCI command timer function */
bda4f23a 1700static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1701{
1702 struct hci_dev *hdev = (void *) arg;
1703
bda4f23a
AE
1704 if (hdev->sent_cmd) {
1705 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1706 u16 opcode = __le16_to_cpu(sent->opcode);
1707
1708 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1709 } else {
1710 BT_ERR("%s command tx timeout", hdev->name);
1711 }
1712
6bd32326 1713 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1714 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1715}
1716
2763eda6 1717struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1718 bdaddr_t *bdaddr)
2763eda6
SJ
1719{
1720 struct oob_data *data;
1721
1722 list_for_each_entry(data, &hdev->remote_oob_data, list)
1723 if (bacmp(bdaddr, &data->bdaddr) == 0)
1724 return data;
1725
1726 return NULL;
1727}
1728
1729int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1730{
1731 struct oob_data *data;
1732
1733 data = hci_find_remote_oob_data(hdev, bdaddr);
1734 if (!data)
1735 return -ENOENT;
1736
6ed93dc6 1737 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1738
1739 list_del(&data->list);
1740 kfree(data);
1741
1742 return 0;
1743}
1744
1745int hci_remote_oob_data_clear(struct hci_dev *hdev)
1746{
1747 struct oob_data *data, *n;
1748
1749 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1750 list_del(&data->list);
1751 kfree(data);
1752 }
1753
1754 return 0;
1755}
1756
1757int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1758 u8 *randomizer)
2763eda6
SJ
1759{
1760 struct oob_data *data;
1761
1762 data = hci_find_remote_oob_data(hdev, bdaddr);
1763
1764 if (!data) {
1765 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1766 if (!data)
1767 return -ENOMEM;
1768
1769 bacpy(&data->bdaddr, bdaddr);
1770 list_add(&data->list, &hdev->remote_oob_data);
1771 }
1772
1773 memcpy(data->hash, hash, sizeof(data->hash));
1774 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1775
6ed93dc6 1776 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1777
1778 return 0;
1779}
1780
04124681 1781struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1782{
8035ded4 1783 struct bdaddr_list *b;
b2a66aad 1784
8035ded4 1785 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1786 if (bacmp(bdaddr, &b->bdaddr) == 0)
1787 return b;
b2a66aad
AJ
1788
1789 return NULL;
1790}
1791
1792int hci_blacklist_clear(struct hci_dev *hdev)
1793{
1794 struct list_head *p, *n;
1795
1796 list_for_each_safe(p, n, &hdev->blacklist) {
1797 struct bdaddr_list *b;
1798
1799 b = list_entry(p, struct bdaddr_list, list);
1800
1801 list_del(p);
1802 kfree(b);
1803 }
1804
1805 return 0;
1806}
1807
88c1fe4b 1808int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1809{
1810 struct bdaddr_list *entry;
b2a66aad
AJ
1811
1812 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1813 return -EBADF;
1814
5e762444
AJ
1815 if (hci_blacklist_lookup(hdev, bdaddr))
1816 return -EEXIST;
b2a66aad
AJ
1817
1818 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1819 if (!entry)
1820 return -ENOMEM;
b2a66aad
AJ
1821
1822 bacpy(&entry->bdaddr, bdaddr);
1823
1824 list_add(&entry->list, &hdev->blacklist);
1825
88c1fe4b 1826 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1827}
1828
88c1fe4b 1829int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1830{
1831 struct bdaddr_list *entry;
b2a66aad 1832
1ec918ce 1833 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1834 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1835
1836 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1837 if (!entry)
5e762444 1838 return -ENOENT;
b2a66aad
AJ
1839
1840 list_del(&entry->list);
1841 kfree(entry);
1842
88c1fe4b 1843 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1844}
1845
42c6b129 1846static void le_scan_param_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1847{
1848 struct le_scan_params *param = (struct le_scan_params *) opt;
1849 struct hci_cp_le_set_scan_param cp;
1850
1851 memset(&cp, 0, sizeof(cp));
1852 cp.type = param->type;
1853 cp.interval = cpu_to_le16(param->interval);
1854 cp.window = cpu_to_le16(param->window);
1855
42c6b129 1856 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
7ba8b4be
AG
1857}
1858
42c6b129 1859static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1860{
1861 struct hci_cp_le_set_scan_enable cp;
1862
1863 memset(&cp, 0, sizeof(cp));
1864 cp.enable = 1;
0431a43c 1865 cp.filter_dup = 1;
7ba8b4be 1866
42c6b129 1867 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
7ba8b4be
AG
1868}
1869
1870static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1871 u16 window, int timeout)
7ba8b4be
AG
1872{
1873 long timeo = msecs_to_jiffies(3000);
1874 struct le_scan_params param;
1875 int err;
1876
1877 BT_DBG("%s", hdev->name);
1878
1879 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1880 return -EINPROGRESS;
1881
1882 param.type = type;
1883 param.interval = interval;
1884 param.window = window;
1885
1886 hci_req_lock(hdev);
1887
01178cd4
JH
1888 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1889 timeo);
7ba8b4be 1890 if (!err)
01178cd4 1891 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
1892
1893 hci_req_unlock(hdev);
1894
1895 if (err < 0)
1896 return err;
1897
46818ed5
JH
1898 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1899 msecs_to_jiffies(timeout));
7ba8b4be
AG
1900
1901 return 0;
1902}
1903
7dbfac1d
AG
1904int hci_cancel_le_scan(struct hci_dev *hdev)
1905{
1906 BT_DBG("%s", hdev->name);
1907
1908 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1909 return -EALREADY;
1910
1911 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1912 struct hci_cp_le_set_scan_enable cp;
1913
1914 /* Send HCI command to disable LE Scan */
1915 memset(&cp, 0, sizeof(cp));
1916 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1917 }
1918
1919 return 0;
1920}
1921
7ba8b4be
AG
1922static void le_scan_disable_work(struct work_struct *work)
1923{
1924 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1925 le_scan_disable.work);
7ba8b4be
AG
1926 struct hci_cp_le_set_scan_enable cp;
1927
1928 BT_DBG("%s", hdev->name);
1929
1930 memset(&cp, 0, sizeof(cp));
1931
1932 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1933}
1934
28b75a89
AG
1935static void le_scan_work(struct work_struct *work)
1936{
1937 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1938 struct le_scan_params *param = &hdev->le_scan_params;
1939
1940 BT_DBG("%s", hdev->name);
1941
04124681
GP
1942 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1943 param->timeout);
28b75a89
AG
1944}
1945
1946int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1947 int timeout)
28b75a89
AG
1948{
1949 struct le_scan_params *param = &hdev->le_scan_params;
1950
1951 BT_DBG("%s", hdev->name);
1952
f1550478
JH
1953 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1954 return -ENOTSUPP;
1955
28b75a89
AG
1956 if (work_busy(&hdev->le_scan))
1957 return -EINPROGRESS;
1958
1959 param->type = type;
1960 param->interval = interval;
1961 param->window = window;
1962 param->timeout = timeout;
1963
1964 queue_work(system_long_wq, &hdev->le_scan);
1965
1966 return 0;
1967}
1968
9be0dab7
DH
1969/* Alloc HCI device */
1970struct hci_dev *hci_alloc_dev(void)
1971{
1972 struct hci_dev *hdev;
1973
1974 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1975 if (!hdev)
1976 return NULL;
1977
b1b813d4
DH
1978 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1979 hdev->esco_type = (ESCO_HV1);
1980 hdev->link_mode = (HCI_LM_ACCEPT);
1981 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
1982 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1983 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 1984
b1b813d4
DH
1985 hdev->sniff_max_interval = 800;
1986 hdev->sniff_min_interval = 80;
1987
1988 mutex_init(&hdev->lock);
1989 mutex_init(&hdev->req_lock);
1990
1991 INIT_LIST_HEAD(&hdev->mgmt_pending);
1992 INIT_LIST_HEAD(&hdev->blacklist);
1993 INIT_LIST_HEAD(&hdev->uuids);
1994 INIT_LIST_HEAD(&hdev->link_keys);
1995 INIT_LIST_HEAD(&hdev->long_term_keys);
1996 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 1997 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
1998
1999 INIT_WORK(&hdev->rx_work, hci_rx_work);
2000 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2001 INIT_WORK(&hdev->tx_work, hci_tx_work);
2002 INIT_WORK(&hdev->power_on, hci_power_on);
2003 INIT_WORK(&hdev->le_scan, le_scan_work);
2004
b1b813d4
DH
2005 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2006 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2007 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2008
9be0dab7 2009 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
2010 skb_queue_head_init(&hdev->rx_q);
2011 skb_queue_head_init(&hdev->cmd_q);
2012 skb_queue_head_init(&hdev->raw_q);
2013
2014 init_waitqueue_head(&hdev->req_wait_q);
2015
bda4f23a 2016 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2017
b1b813d4
DH
2018 hci_init_sysfs(hdev);
2019 discovery_init(hdev);
9be0dab7
DH
2020
2021 return hdev;
2022}
2023EXPORT_SYMBOL(hci_alloc_dev);
2024
2025/* Free HCI device */
2026void hci_free_dev(struct hci_dev *hdev)
2027{
2028 skb_queue_purge(&hdev->driver_init);
2029
2030 /* will free via device release */
2031 put_device(&hdev->dev);
2032}
2033EXPORT_SYMBOL(hci_free_dev);
2034
1da177e4
LT
2035/* Register HCI device */
2036int hci_register_dev(struct hci_dev *hdev)
2037{
b1b813d4 2038 int id, error;
1da177e4 2039
010666a1 2040 if (!hdev->open || !hdev->close)
1da177e4
LT
2041 return -EINVAL;
2042
08add513
MM
2043 /* Do not allow HCI_AMP devices to register at index 0,
2044 * so the index can be used as the AMP controller ID.
2045 */
3df92b31
SL
2046 switch (hdev->dev_type) {
2047 case HCI_BREDR:
2048 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2049 break;
2050 case HCI_AMP:
2051 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2052 break;
2053 default:
2054 return -EINVAL;
1da177e4 2055 }
8e87d142 2056
3df92b31
SL
2057 if (id < 0)
2058 return id;
2059
1da177e4
LT
2060 sprintf(hdev->name, "hci%d", id);
2061 hdev->id = id;
2d8b3a11
AE
2062
2063 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2064
3df92b31
SL
2065 write_lock(&hci_dev_list_lock);
2066 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2067 write_unlock(&hci_dev_list_lock);
1da177e4 2068
32845eb1 2069 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 2070 WQ_MEM_RECLAIM, 1);
33ca954d
DH
2071 if (!hdev->workqueue) {
2072 error = -ENOMEM;
2073 goto err;
2074 }
f48fd9c8 2075
6ead1bbc
JH
2076 hdev->req_workqueue = alloc_workqueue(hdev->name,
2077 WQ_HIGHPRI | WQ_UNBOUND |
2078 WQ_MEM_RECLAIM, 1);
2079 if (!hdev->req_workqueue) {
2080 destroy_workqueue(hdev->workqueue);
2081 error = -ENOMEM;
2082 goto err;
2083 }
2084
33ca954d
DH
2085 error = hci_add_sysfs(hdev);
2086 if (error < 0)
2087 goto err_wqueue;
1da177e4 2088
611b30f7 2089 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2090 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2091 hdev);
611b30f7
MH
2092 if (hdev->rfkill) {
2093 if (rfkill_register(hdev->rfkill) < 0) {
2094 rfkill_destroy(hdev->rfkill);
2095 hdev->rfkill = NULL;
2096 }
2097 }
2098
a8b2d5c2 2099 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2100
2101 if (hdev->dev_type != HCI_AMP)
2102 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2103
1da177e4 2104 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2105 hci_dev_hold(hdev);
1da177e4 2106
19202573 2107 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2108
1da177e4 2109 return id;
f48fd9c8 2110
33ca954d
DH
2111err_wqueue:
2112 destroy_workqueue(hdev->workqueue);
6ead1bbc 2113 destroy_workqueue(hdev->req_workqueue);
33ca954d 2114err:
3df92b31 2115 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2116 write_lock(&hci_dev_list_lock);
f48fd9c8 2117 list_del(&hdev->list);
f20d09d5 2118 write_unlock(&hci_dev_list_lock);
f48fd9c8 2119
33ca954d 2120 return error;
1da177e4
LT
2121}
2122EXPORT_SYMBOL(hci_register_dev);
2123
2124/* Unregister HCI device */
59735631 2125void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2126{
3df92b31 2127 int i, id;
ef222013 2128
c13854ce 2129 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2130
94324962
JH
2131 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2132
3df92b31
SL
2133 id = hdev->id;
2134
f20d09d5 2135 write_lock(&hci_dev_list_lock);
1da177e4 2136 list_del(&hdev->list);
f20d09d5 2137 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2138
2139 hci_dev_do_close(hdev);
2140
cd4c5391 2141 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2142 kfree_skb(hdev->reassembly[i]);
2143
b9b5ef18
GP
2144 cancel_work_sync(&hdev->power_on);
2145
ab81cbf9 2146 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2147 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2148 hci_dev_lock(hdev);
744cf19e 2149 mgmt_index_removed(hdev);
09fd0de5 2150 hci_dev_unlock(hdev);
56e5cb86 2151 }
ab81cbf9 2152
2e58ef3e
JH
2153 /* mgmt_index_removed should take care of emptying the
2154 * pending list */
2155 BUG_ON(!list_empty(&hdev->mgmt_pending));
2156
1da177e4
LT
2157 hci_notify(hdev, HCI_DEV_UNREG);
2158
611b30f7
MH
2159 if (hdev->rfkill) {
2160 rfkill_unregister(hdev->rfkill);
2161 rfkill_destroy(hdev->rfkill);
2162 }
2163
ce242970 2164 hci_del_sysfs(hdev);
147e2d59 2165
f48fd9c8 2166 destroy_workqueue(hdev->workqueue);
6ead1bbc 2167 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2168
09fd0de5 2169 hci_dev_lock(hdev);
e2e0cacb 2170 hci_blacklist_clear(hdev);
2aeb9a1a 2171 hci_uuids_clear(hdev);
55ed8ca1 2172 hci_link_keys_clear(hdev);
b899efaf 2173 hci_smp_ltks_clear(hdev);
2763eda6 2174 hci_remote_oob_data_clear(hdev);
09fd0de5 2175 hci_dev_unlock(hdev);
e2e0cacb 2176
dc946bd8 2177 hci_dev_put(hdev);
3df92b31
SL
2178
2179 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2180}
2181EXPORT_SYMBOL(hci_unregister_dev);
2182
2183/* Suspend HCI device */
2184int hci_suspend_dev(struct hci_dev *hdev)
2185{
2186 hci_notify(hdev, HCI_DEV_SUSPEND);
2187 return 0;
2188}
2189EXPORT_SYMBOL(hci_suspend_dev);
2190
2191/* Resume HCI device */
2192int hci_resume_dev(struct hci_dev *hdev)
2193{
2194 hci_notify(hdev, HCI_DEV_RESUME);
2195 return 0;
2196}
2197EXPORT_SYMBOL(hci_resume_dev);
2198
76bca880
MH
2199/* Receive frame from HCI drivers */
2200int hci_recv_frame(struct sk_buff *skb)
2201{
2202 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2203 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2204 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2205 kfree_skb(skb);
2206 return -ENXIO;
2207 }
2208
d82603c6 2209 /* Incoming skb */
76bca880
MH
2210 bt_cb(skb)->incoming = 1;
2211
2212 /* Time stamp */
2213 __net_timestamp(skb);
2214
76bca880 2215 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2216 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2217
76bca880
MH
2218 return 0;
2219}
2220EXPORT_SYMBOL(hci_recv_frame);
2221
33e882a5 2222static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2223 int count, __u8 index)
33e882a5
SS
2224{
2225 int len = 0;
2226 int hlen = 0;
2227 int remain = count;
2228 struct sk_buff *skb;
2229 struct bt_skb_cb *scb;
2230
2231 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2232 index >= NUM_REASSEMBLY)
33e882a5
SS
2233 return -EILSEQ;
2234
2235 skb = hdev->reassembly[index];
2236
2237 if (!skb) {
2238 switch (type) {
2239 case HCI_ACLDATA_PKT:
2240 len = HCI_MAX_FRAME_SIZE;
2241 hlen = HCI_ACL_HDR_SIZE;
2242 break;
2243 case HCI_EVENT_PKT:
2244 len = HCI_MAX_EVENT_SIZE;
2245 hlen = HCI_EVENT_HDR_SIZE;
2246 break;
2247 case HCI_SCODATA_PKT:
2248 len = HCI_MAX_SCO_SIZE;
2249 hlen = HCI_SCO_HDR_SIZE;
2250 break;
2251 }
2252
1e429f38 2253 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2254 if (!skb)
2255 return -ENOMEM;
2256
2257 scb = (void *) skb->cb;
2258 scb->expect = hlen;
2259 scb->pkt_type = type;
2260
2261 skb->dev = (void *) hdev;
2262 hdev->reassembly[index] = skb;
2263 }
2264
2265 while (count) {
2266 scb = (void *) skb->cb;
89bb46d0 2267 len = min_t(uint, scb->expect, count);
33e882a5
SS
2268
2269 memcpy(skb_put(skb, len), data, len);
2270
2271 count -= len;
2272 data += len;
2273 scb->expect -= len;
2274 remain = count;
2275
2276 switch (type) {
2277 case HCI_EVENT_PKT:
2278 if (skb->len == HCI_EVENT_HDR_SIZE) {
2279 struct hci_event_hdr *h = hci_event_hdr(skb);
2280 scb->expect = h->plen;
2281
2282 if (skb_tailroom(skb) < scb->expect) {
2283 kfree_skb(skb);
2284 hdev->reassembly[index] = NULL;
2285 return -ENOMEM;
2286 }
2287 }
2288 break;
2289
2290 case HCI_ACLDATA_PKT:
2291 if (skb->len == HCI_ACL_HDR_SIZE) {
2292 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2293 scb->expect = __le16_to_cpu(h->dlen);
2294
2295 if (skb_tailroom(skb) < scb->expect) {
2296 kfree_skb(skb);
2297 hdev->reassembly[index] = NULL;
2298 return -ENOMEM;
2299 }
2300 }
2301 break;
2302
2303 case HCI_SCODATA_PKT:
2304 if (skb->len == HCI_SCO_HDR_SIZE) {
2305 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2306 scb->expect = h->dlen;
2307
2308 if (skb_tailroom(skb) < scb->expect) {
2309 kfree_skb(skb);
2310 hdev->reassembly[index] = NULL;
2311 return -ENOMEM;
2312 }
2313 }
2314 break;
2315 }
2316
2317 if (scb->expect == 0) {
2318 /* Complete frame */
2319
2320 bt_cb(skb)->pkt_type = type;
2321 hci_recv_frame(skb);
2322
2323 hdev->reassembly[index] = NULL;
2324 return remain;
2325 }
2326 }
2327
2328 return remain;
2329}
2330
ef222013
MH
2331int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2332{
f39a3c06
SS
2333 int rem = 0;
2334
ef222013
MH
2335 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2336 return -EILSEQ;
2337
da5f6c37 2338 while (count) {
1e429f38 2339 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2340 if (rem < 0)
2341 return rem;
ef222013 2342
f39a3c06
SS
2343 data += (count - rem);
2344 count = rem;
f81c6224 2345 }
ef222013 2346
f39a3c06 2347 return rem;
ef222013
MH
2348}
2349EXPORT_SYMBOL(hci_recv_fragment);
2350
99811510
SS
2351#define STREAM_REASSEMBLY 0
2352
2353int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2354{
2355 int type;
2356 int rem = 0;
2357
da5f6c37 2358 while (count) {
99811510
SS
2359 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2360
2361 if (!skb) {
2362 struct { char type; } *pkt;
2363
2364 /* Start of the frame */
2365 pkt = data;
2366 type = pkt->type;
2367
2368 data++;
2369 count--;
2370 } else
2371 type = bt_cb(skb)->pkt_type;
2372
1e429f38 2373 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2374 STREAM_REASSEMBLY);
99811510
SS
2375 if (rem < 0)
2376 return rem;
2377
2378 data += (count - rem);
2379 count = rem;
f81c6224 2380 }
99811510
SS
2381
2382 return rem;
2383}
2384EXPORT_SYMBOL(hci_recv_stream_fragment);
2385
1da177e4
LT
2386/* ---- Interface to upper protocols ---- */
2387
1da177e4
LT
2388int hci_register_cb(struct hci_cb *cb)
2389{
2390 BT_DBG("%p name %s", cb, cb->name);
2391
f20d09d5 2392 write_lock(&hci_cb_list_lock);
1da177e4 2393 list_add(&cb->list, &hci_cb_list);
f20d09d5 2394 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2395
2396 return 0;
2397}
2398EXPORT_SYMBOL(hci_register_cb);
2399
2400int hci_unregister_cb(struct hci_cb *cb)
2401{
2402 BT_DBG("%p name %s", cb, cb->name);
2403
f20d09d5 2404 write_lock(&hci_cb_list_lock);
1da177e4 2405 list_del(&cb->list);
f20d09d5 2406 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2407
2408 return 0;
2409}
2410EXPORT_SYMBOL(hci_unregister_cb);
2411
2412static int hci_send_frame(struct sk_buff *skb)
2413{
2414 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2415
2416 if (!hdev) {
2417 kfree_skb(skb);
2418 return -ENODEV;
2419 }
2420
0d48d939 2421 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2422
cd82e61c
MH
2423 /* Time stamp */
2424 __net_timestamp(skb);
1da177e4 2425
cd82e61c
MH
2426 /* Send copy to monitor */
2427 hci_send_to_monitor(hdev, skb);
2428
2429 if (atomic_read(&hdev->promisc)) {
2430 /* Send copy to the sockets */
470fe1b5 2431 hci_send_to_sock(hdev, skb);
1da177e4
LT
2432 }
2433
2434 /* Get rid of skb owner, prior to sending to the driver. */
2435 skb_orphan(skb);
2436
2437 return hdev->send(skb);
2438}
2439
3119ae95
JH
2440void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2441{
2442 skb_queue_head_init(&req->cmd_q);
2443 req->hdev = hdev;
5d73e034 2444 req->err = 0;
3119ae95
JH
2445}
2446
2447int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2448{
2449 struct hci_dev *hdev = req->hdev;
2450 struct sk_buff *skb;
2451 unsigned long flags;
2452
2453 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2454
5d73e034
AG
2455 /* If an error occured during request building, remove all HCI
2456 * commands queued on the HCI request queue.
2457 */
2458 if (req->err) {
2459 skb_queue_purge(&req->cmd_q);
2460 return req->err;
2461 }
2462
3119ae95
JH
2463 /* Do not allow empty requests */
2464 if (skb_queue_empty(&req->cmd_q))
382b0c39 2465 return -ENODATA;
3119ae95
JH
2466
2467 skb = skb_peek_tail(&req->cmd_q);
2468 bt_cb(skb)->req.complete = complete;
2469
2470 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2471 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2472 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2473
2474 queue_work(hdev->workqueue, &hdev->cmd_work);
2475
2476 return 0;
2477}
2478
1ca3a9d0
JH
2479static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2480 u32 plen, void *param)
1da177e4
LT
2481{
2482 int len = HCI_COMMAND_HDR_SIZE + plen;
2483 struct hci_command_hdr *hdr;
2484 struct sk_buff *skb;
2485
1da177e4 2486 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2487 if (!skb)
2488 return NULL;
1da177e4
LT
2489
2490 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2491 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2492 hdr->plen = plen;
2493
2494 if (plen)
2495 memcpy(skb_put(skb, plen), param, plen);
2496
2497 BT_DBG("skb len %d", skb->len);
2498
0d48d939 2499 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2500 skb->dev = (void *) hdev;
c78ae283 2501
1ca3a9d0
JH
2502 return skb;
2503}
2504
2505/* Send HCI command */
2506int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2507{
2508 struct sk_buff *skb;
2509
2510 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2511
2512 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2513 if (!skb) {
2514 BT_ERR("%s no memory for command", hdev->name);
2515 return -ENOMEM;
2516 }
2517
11714b3d
JH
2518 /* Stand-alone HCI commands must be flaged as
2519 * single-command requests.
2520 */
2521 bt_cb(skb)->req.start = true;
2522
1da177e4 2523 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2524 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2525
2526 return 0;
2527}
1da177e4 2528
71c76a17 2529/* Queue a command to an asynchronous HCI request */
e348fe6b 2530void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
71c76a17
JH
2531{
2532 struct hci_dev *hdev = req->hdev;
2533 struct sk_buff *skb;
2534
2535 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2536
34739c1e
AG
2537 /* If an error occured during request building, there is no point in
2538 * queueing the HCI command. We can simply return.
2539 */
2540 if (req->err)
2541 return;
2542
71c76a17
JH
2543 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2544 if (!skb) {
5d73e034
AG
2545 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2546 hdev->name, opcode);
2547 req->err = -ENOMEM;
e348fe6b 2548 return;
71c76a17
JH
2549 }
2550
2551 if (skb_queue_empty(&req->cmd_q))
2552 bt_cb(skb)->req.start = true;
2553
2554 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2555}
2556
1da177e4 2557/* Get data from the previously sent command */
a9de9248 2558void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2559{
2560 struct hci_command_hdr *hdr;
2561
2562 if (!hdev->sent_cmd)
2563 return NULL;
2564
2565 hdr = (void *) hdev->sent_cmd->data;
2566
a9de9248 2567 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2568 return NULL;
2569
f0e09510 2570 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2571
2572 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2573}
2574
2575/* Send ACL data */
2576static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2577{
2578 struct hci_acl_hdr *hdr;
2579 int len = skb->len;
2580
badff6d0
ACM
2581 skb_push(skb, HCI_ACL_HDR_SIZE);
2582 skb_reset_transport_header(skb);
9c70220b 2583 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2584 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2585 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2586}
2587
ee22be7e 2588static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2589 struct sk_buff *skb, __u16 flags)
1da177e4 2590{
ee22be7e 2591 struct hci_conn *conn = chan->conn;
1da177e4
LT
2592 struct hci_dev *hdev = conn->hdev;
2593 struct sk_buff *list;
2594
087bfd99
GP
2595 skb->len = skb_headlen(skb);
2596 skb->data_len = 0;
2597
2598 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2599
2600 switch (hdev->dev_type) {
2601 case HCI_BREDR:
2602 hci_add_acl_hdr(skb, conn->handle, flags);
2603 break;
2604 case HCI_AMP:
2605 hci_add_acl_hdr(skb, chan->handle, flags);
2606 break;
2607 default:
2608 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2609 return;
2610 }
087bfd99 2611
70f23020
AE
2612 list = skb_shinfo(skb)->frag_list;
2613 if (!list) {
1da177e4
LT
2614 /* Non fragmented */
2615 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2616
73d80deb 2617 skb_queue_tail(queue, skb);
1da177e4
LT
2618 } else {
2619 /* Fragmented */
2620 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2621
2622 skb_shinfo(skb)->frag_list = NULL;
2623
2624 /* Queue all fragments atomically */
af3e6359 2625 spin_lock(&queue->lock);
1da177e4 2626
73d80deb 2627 __skb_queue_tail(queue, skb);
e702112f
AE
2628
2629 flags &= ~ACL_START;
2630 flags |= ACL_CONT;
1da177e4
LT
2631 do {
2632 skb = list; list = list->next;
8e87d142 2633
1da177e4 2634 skb->dev = (void *) hdev;
0d48d939 2635 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2636 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2637
2638 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2639
73d80deb 2640 __skb_queue_tail(queue, skb);
1da177e4
LT
2641 } while (list);
2642
af3e6359 2643 spin_unlock(&queue->lock);
1da177e4 2644 }
73d80deb
LAD
2645}
2646
2647void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2648{
ee22be7e 2649 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2650
f0e09510 2651 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2652
2653 skb->dev = (void *) hdev;
73d80deb 2654
ee22be7e 2655 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2656
3eff45ea 2657 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2658}
1da177e4
LT
2659
2660/* Send SCO data */
0d861d8b 2661void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2662{
2663 struct hci_dev *hdev = conn->hdev;
2664 struct hci_sco_hdr hdr;
2665
2666 BT_DBG("%s len %d", hdev->name, skb->len);
2667
aca3192c 2668 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2669 hdr.dlen = skb->len;
2670
badff6d0
ACM
2671 skb_push(skb, HCI_SCO_HDR_SIZE);
2672 skb_reset_transport_header(skb);
9c70220b 2673 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2674
2675 skb->dev = (void *) hdev;
0d48d939 2676 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2677
1da177e4 2678 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2679 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2680}
1da177e4
LT
2681
2682/* ---- HCI TX task (outgoing data) ---- */
2683
2684/* HCI Connection scheduler */
6039aa73
GP
2685static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2686 int *quote)
1da177e4
LT
2687{
2688 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2689 struct hci_conn *conn = NULL, *c;
abc5de8f 2690 unsigned int num = 0, min = ~0;
1da177e4 2691
8e87d142 2692 /* We don't have to lock device here. Connections are always
1da177e4 2693 * added and removed with TX task disabled. */
bf4c6325
GP
2694
2695 rcu_read_lock();
2696
2697 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2698 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2699 continue;
769be974
MH
2700
2701 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2702 continue;
2703
1da177e4
LT
2704 num++;
2705
2706 if (c->sent < min) {
2707 min = c->sent;
2708 conn = c;
2709 }
52087a79
LAD
2710
2711 if (hci_conn_num(hdev, type) == num)
2712 break;
1da177e4
LT
2713 }
2714
bf4c6325
GP
2715 rcu_read_unlock();
2716
1da177e4 2717 if (conn) {
6ed58ec5
VT
2718 int cnt, q;
2719
2720 switch (conn->type) {
2721 case ACL_LINK:
2722 cnt = hdev->acl_cnt;
2723 break;
2724 case SCO_LINK:
2725 case ESCO_LINK:
2726 cnt = hdev->sco_cnt;
2727 break;
2728 case LE_LINK:
2729 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2730 break;
2731 default:
2732 cnt = 0;
2733 BT_ERR("Unknown link type");
2734 }
2735
2736 q = cnt / num;
1da177e4
LT
2737 *quote = q ? q : 1;
2738 } else
2739 *quote = 0;
2740
2741 BT_DBG("conn %p quote %d", conn, *quote);
2742 return conn;
2743}
2744
6039aa73 2745static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2746{
2747 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2748 struct hci_conn *c;
1da177e4 2749
bae1f5d9 2750 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2751
bf4c6325
GP
2752 rcu_read_lock();
2753
1da177e4 2754 /* Kill stalled connections */
bf4c6325 2755 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2756 if (c->type == type && c->sent) {
6ed93dc6
AE
2757 BT_ERR("%s killing stalled connection %pMR",
2758 hdev->name, &c->dst);
bed71748 2759 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2760 }
2761 }
bf4c6325
GP
2762
2763 rcu_read_unlock();
1da177e4
LT
2764}
2765
6039aa73
GP
2766static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2767 int *quote)
1da177e4 2768{
73d80deb
LAD
2769 struct hci_conn_hash *h = &hdev->conn_hash;
2770 struct hci_chan *chan = NULL;
abc5de8f 2771 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2772 struct hci_conn *conn;
73d80deb
LAD
2773 int cnt, q, conn_num = 0;
2774
2775 BT_DBG("%s", hdev->name);
2776
bf4c6325
GP
2777 rcu_read_lock();
2778
2779 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2780 struct hci_chan *tmp;
2781
2782 if (conn->type != type)
2783 continue;
2784
2785 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2786 continue;
2787
2788 conn_num++;
2789
8192edef 2790 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2791 struct sk_buff *skb;
2792
2793 if (skb_queue_empty(&tmp->data_q))
2794 continue;
2795
2796 skb = skb_peek(&tmp->data_q);
2797 if (skb->priority < cur_prio)
2798 continue;
2799
2800 if (skb->priority > cur_prio) {
2801 num = 0;
2802 min = ~0;
2803 cur_prio = skb->priority;
2804 }
2805
2806 num++;
2807
2808 if (conn->sent < min) {
2809 min = conn->sent;
2810 chan = tmp;
2811 }
2812 }
2813
2814 if (hci_conn_num(hdev, type) == conn_num)
2815 break;
2816 }
2817
bf4c6325
GP
2818 rcu_read_unlock();
2819
73d80deb
LAD
2820 if (!chan)
2821 return NULL;
2822
2823 switch (chan->conn->type) {
2824 case ACL_LINK:
2825 cnt = hdev->acl_cnt;
2826 break;
bd1eb66b
AE
2827 case AMP_LINK:
2828 cnt = hdev->block_cnt;
2829 break;
73d80deb
LAD
2830 case SCO_LINK:
2831 case ESCO_LINK:
2832 cnt = hdev->sco_cnt;
2833 break;
2834 case LE_LINK:
2835 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2836 break;
2837 default:
2838 cnt = 0;
2839 BT_ERR("Unknown link type");
2840 }
2841
2842 q = cnt / num;
2843 *quote = q ? q : 1;
2844 BT_DBG("chan %p quote %d", chan, *quote);
2845 return chan;
2846}
2847
02b20f0b
LAD
2848static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2849{
2850 struct hci_conn_hash *h = &hdev->conn_hash;
2851 struct hci_conn *conn;
2852 int num = 0;
2853
2854 BT_DBG("%s", hdev->name);
2855
bf4c6325
GP
2856 rcu_read_lock();
2857
2858 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2859 struct hci_chan *chan;
2860
2861 if (conn->type != type)
2862 continue;
2863
2864 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2865 continue;
2866
2867 num++;
2868
8192edef 2869 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2870 struct sk_buff *skb;
2871
2872 if (chan->sent) {
2873 chan->sent = 0;
2874 continue;
2875 }
2876
2877 if (skb_queue_empty(&chan->data_q))
2878 continue;
2879
2880 skb = skb_peek(&chan->data_q);
2881 if (skb->priority >= HCI_PRIO_MAX - 1)
2882 continue;
2883
2884 skb->priority = HCI_PRIO_MAX - 1;
2885
2886 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2887 skb->priority);
02b20f0b
LAD
2888 }
2889
2890 if (hci_conn_num(hdev, type) == num)
2891 break;
2892 }
bf4c6325
GP
2893
2894 rcu_read_unlock();
2895
02b20f0b
LAD
2896}
2897
b71d385a
AE
2898static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2899{
2900 /* Calculate count of blocks used by this packet */
2901 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2902}
2903
6039aa73 2904static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2905{
1da177e4
LT
2906 if (!test_bit(HCI_RAW, &hdev->flags)) {
2907 /* ACL tx timeout must be longer than maximum
2908 * link supervision timeout (40.9 seconds) */
63d2bc1b 2909 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2910 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2911 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2912 }
63d2bc1b 2913}
1da177e4 2914
6039aa73 2915static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2916{
2917 unsigned int cnt = hdev->acl_cnt;
2918 struct hci_chan *chan;
2919 struct sk_buff *skb;
2920 int quote;
2921
2922 __check_timeout(hdev, cnt);
04837f64 2923
73d80deb 2924 while (hdev->acl_cnt &&
a8c5fb1a 2925 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2926 u32 priority = (skb_peek(&chan->data_q))->priority;
2927 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2928 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2929 skb->len, skb->priority);
73d80deb 2930
ec1cce24
LAD
2931 /* Stop if priority has changed */
2932 if (skb->priority < priority)
2933 break;
2934
2935 skb = skb_dequeue(&chan->data_q);
2936
73d80deb 2937 hci_conn_enter_active_mode(chan->conn,
04124681 2938 bt_cb(skb)->force_active);
04837f64 2939
1da177e4
LT
2940 hci_send_frame(skb);
2941 hdev->acl_last_tx = jiffies;
2942
2943 hdev->acl_cnt--;
73d80deb
LAD
2944 chan->sent++;
2945 chan->conn->sent++;
1da177e4
LT
2946 }
2947 }
02b20f0b
LAD
2948
2949 if (cnt != hdev->acl_cnt)
2950 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2951}
2952
6039aa73 2953static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2954{
63d2bc1b 2955 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2956 struct hci_chan *chan;
2957 struct sk_buff *skb;
2958 int quote;
bd1eb66b 2959 u8 type;
b71d385a 2960
63d2bc1b 2961 __check_timeout(hdev, cnt);
b71d385a 2962
bd1eb66b
AE
2963 BT_DBG("%s", hdev->name);
2964
2965 if (hdev->dev_type == HCI_AMP)
2966 type = AMP_LINK;
2967 else
2968 type = ACL_LINK;
2969
b71d385a 2970 while (hdev->block_cnt > 0 &&
bd1eb66b 2971 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
2972 u32 priority = (skb_peek(&chan->data_q))->priority;
2973 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2974 int blocks;
2975
2976 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2977 skb->len, skb->priority);
b71d385a
AE
2978
2979 /* Stop if priority has changed */
2980 if (skb->priority < priority)
2981 break;
2982
2983 skb = skb_dequeue(&chan->data_q);
2984
2985 blocks = __get_blocks(hdev, skb);
2986 if (blocks > hdev->block_cnt)
2987 return;
2988
2989 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2990 bt_cb(skb)->force_active);
b71d385a
AE
2991
2992 hci_send_frame(skb);
2993 hdev->acl_last_tx = jiffies;
2994
2995 hdev->block_cnt -= blocks;
2996 quote -= blocks;
2997
2998 chan->sent += blocks;
2999 chan->conn->sent += blocks;
3000 }
3001 }
3002
3003 if (cnt != hdev->block_cnt)
bd1eb66b 3004 hci_prio_recalculate(hdev, type);
b71d385a
AE
3005}
3006
6039aa73 3007static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3008{
3009 BT_DBG("%s", hdev->name);
3010
bd1eb66b
AE
3011 /* No ACL link over BR/EDR controller */
3012 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3013 return;
3014
3015 /* No AMP link over AMP controller */
3016 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3017 return;
3018
3019 switch (hdev->flow_ctl_mode) {
3020 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3021 hci_sched_acl_pkt(hdev);
3022 break;
3023
3024 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3025 hci_sched_acl_blk(hdev);
3026 break;
3027 }
3028}
3029
1da177e4 3030/* Schedule SCO */
6039aa73 3031static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3032{
3033 struct hci_conn *conn;
3034 struct sk_buff *skb;
3035 int quote;
3036
3037 BT_DBG("%s", hdev->name);
3038
52087a79
LAD
3039 if (!hci_conn_num(hdev, SCO_LINK))
3040 return;
3041
1da177e4
LT
3042 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3043 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3044 BT_DBG("skb %p len %d", skb, skb->len);
3045 hci_send_frame(skb);
3046
3047 conn->sent++;
3048 if (conn->sent == ~0)
3049 conn->sent = 0;
3050 }
3051 }
3052}
3053
6039aa73 3054static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3055{
3056 struct hci_conn *conn;
3057 struct sk_buff *skb;
3058 int quote;
3059
3060 BT_DBG("%s", hdev->name);
3061
52087a79
LAD
3062 if (!hci_conn_num(hdev, ESCO_LINK))
3063 return;
3064
8fc9ced3
GP
3065 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3066 &quote))) {
b6a0dc82
MH
3067 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3068 BT_DBG("skb %p len %d", skb, skb->len);
3069 hci_send_frame(skb);
3070
3071 conn->sent++;
3072 if (conn->sent == ~0)
3073 conn->sent = 0;
3074 }
3075 }
3076}
3077
6039aa73 3078static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3079{
73d80deb 3080 struct hci_chan *chan;
6ed58ec5 3081 struct sk_buff *skb;
02b20f0b 3082 int quote, cnt, tmp;
6ed58ec5
VT
3083
3084 BT_DBG("%s", hdev->name);
3085
52087a79
LAD
3086 if (!hci_conn_num(hdev, LE_LINK))
3087 return;
3088
6ed58ec5
VT
3089 if (!test_bit(HCI_RAW, &hdev->flags)) {
3090 /* LE tx timeout must be longer than maximum
3091 * link supervision timeout (40.9 seconds) */
bae1f5d9 3092 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3093 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3094 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3095 }
3096
3097 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3098 tmp = cnt;
73d80deb 3099 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3100 u32 priority = (skb_peek(&chan->data_q))->priority;
3101 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3102 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3103 skb->len, skb->priority);
6ed58ec5 3104
ec1cce24
LAD
3105 /* Stop if priority has changed */
3106 if (skb->priority < priority)
3107 break;
3108
3109 skb = skb_dequeue(&chan->data_q);
3110
6ed58ec5
VT
3111 hci_send_frame(skb);
3112 hdev->le_last_tx = jiffies;
3113
3114 cnt--;
73d80deb
LAD
3115 chan->sent++;
3116 chan->conn->sent++;
6ed58ec5
VT
3117 }
3118 }
73d80deb 3119
6ed58ec5
VT
3120 if (hdev->le_pkts)
3121 hdev->le_cnt = cnt;
3122 else
3123 hdev->acl_cnt = cnt;
02b20f0b
LAD
3124
3125 if (cnt != tmp)
3126 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3127}
3128
3eff45ea 3129static void hci_tx_work(struct work_struct *work)
1da177e4 3130{
3eff45ea 3131 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3132 struct sk_buff *skb;
3133
6ed58ec5 3134 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3135 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3136
3137 /* Schedule queues and send stuff to HCI driver */
3138
3139 hci_sched_acl(hdev);
3140
3141 hci_sched_sco(hdev);
3142
b6a0dc82
MH
3143 hci_sched_esco(hdev);
3144
6ed58ec5
VT
3145 hci_sched_le(hdev);
3146
1da177e4
LT
3147 /* Send next queued raw (unknown type) packet */
3148 while ((skb = skb_dequeue(&hdev->raw_q)))
3149 hci_send_frame(skb);
1da177e4
LT
3150}
3151
25985edc 3152/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3153
3154/* ACL data packet */
6039aa73 3155static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3156{
3157 struct hci_acl_hdr *hdr = (void *) skb->data;
3158 struct hci_conn *conn;
3159 __u16 handle, flags;
3160
3161 skb_pull(skb, HCI_ACL_HDR_SIZE);
3162
3163 handle = __le16_to_cpu(hdr->handle);
3164 flags = hci_flags(handle);
3165 handle = hci_handle(handle);
3166
f0e09510 3167 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3168 handle, flags);
1da177e4
LT
3169
3170 hdev->stat.acl_rx++;
3171
3172 hci_dev_lock(hdev);
3173 conn = hci_conn_hash_lookup_handle(hdev, handle);
3174 hci_dev_unlock(hdev);
8e87d142 3175
1da177e4 3176 if (conn) {
65983fc7 3177 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3178
1da177e4 3179 /* Send to upper protocol */
686ebf28
UF
3180 l2cap_recv_acldata(conn, skb, flags);
3181 return;
1da177e4 3182 } else {
8e87d142 3183 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3184 hdev->name, handle);
1da177e4
LT
3185 }
3186
3187 kfree_skb(skb);
3188}
3189
3190/* SCO data packet */
6039aa73 3191static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3192{
3193 struct hci_sco_hdr *hdr = (void *) skb->data;
3194 struct hci_conn *conn;
3195 __u16 handle;
3196
3197 skb_pull(skb, HCI_SCO_HDR_SIZE);
3198
3199 handle = __le16_to_cpu(hdr->handle);
3200
f0e09510 3201 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3202
3203 hdev->stat.sco_rx++;
3204
3205 hci_dev_lock(hdev);
3206 conn = hci_conn_hash_lookup_handle(hdev, handle);
3207 hci_dev_unlock(hdev);
3208
3209 if (conn) {
1da177e4 3210 /* Send to upper protocol */
686ebf28
UF
3211 sco_recv_scodata(conn, skb);
3212 return;
1da177e4 3213 } else {
8e87d142 3214 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3215 hdev->name, handle);
1da177e4
LT
3216 }
3217
3218 kfree_skb(skb);
3219}
3220
9238f36a
JH
3221static bool hci_req_is_complete(struct hci_dev *hdev)
3222{
3223 struct sk_buff *skb;
3224
3225 skb = skb_peek(&hdev->cmd_q);
3226 if (!skb)
3227 return true;
3228
3229 return bt_cb(skb)->req.start;
3230}
3231
42c6b129
JH
3232static void hci_resend_last(struct hci_dev *hdev)
3233{
3234 struct hci_command_hdr *sent;
3235 struct sk_buff *skb;
3236 u16 opcode;
3237
3238 if (!hdev->sent_cmd)
3239 return;
3240
3241 sent = (void *) hdev->sent_cmd->data;
3242 opcode = __le16_to_cpu(sent->opcode);
3243 if (opcode == HCI_OP_RESET)
3244 return;
3245
3246 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3247 if (!skb)
3248 return;
3249
3250 skb_queue_head(&hdev->cmd_q, skb);
3251 queue_work(hdev->workqueue, &hdev->cmd_work);
3252}
3253
9238f36a
JH
3254void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3255{
3256 hci_req_complete_t req_complete = NULL;
3257 struct sk_buff *skb;
3258 unsigned long flags;
3259
3260 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3261
42c6b129
JH
3262 /* If the completed command doesn't match the last one that was
3263 * sent we need to do special handling of it.
9238f36a 3264 */
42c6b129
JH
3265 if (!hci_sent_cmd_data(hdev, opcode)) {
3266 /* Some CSR based controllers generate a spontaneous
3267 * reset complete event during init and any pending
3268 * command will never be completed. In such a case we
3269 * need to resend whatever was the last sent
3270 * command.
3271 */
3272 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3273 hci_resend_last(hdev);
3274
9238f36a 3275 return;
42c6b129 3276 }
9238f36a
JH
3277
3278 /* If the command succeeded and there's still more commands in
3279 * this request the request is not yet complete.
3280 */
3281 if (!status && !hci_req_is_complete(hdev))
3282 return;
3283
3284 /* If this was the last command in a request the complete
3285 * callback would be found in hdev->sent_cmd instead of the
3286 * command queue (hdev->cmd_q).
3287 */
3288 if (hdev->sent_cmd) {
3289 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3290 if (req_complete)
3291 goto call_complete;
3292 }
3293
3294 /* Remove all pending commands belonging to this request */
3295 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3296 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3297 if (bt_cb(skb)->req.start) {
3298 __skb_queue_head(&hdev->cmd_q, skb);
3299 break;
3300 }
3301
3302 req_complete = bt_cb(skb)->req.complete;
3303 kfree_skb(skb);
3304 }
3305 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3306
3307call_complete:
3308 if (req_complete)
3309 req_complete(hdev, status);
3310}
3311
3312void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3313{
3314 hci_req_complete_t req_complete = NULL;
3315
3316 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3317
3318 if (status) {
3319 hci_req_cmd_complete(hdev, opcode, status);
3320 return;
3321 }
3322
3323 /* No need to handle success status if there are more commands */
3324 if (!hci_req_is_complete(hdev))
3325 return;
3326
3327 if (hdev->sent_cmd)
3328 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3329
3330 /* If the request doesn't have a complete callback or there
3331 * are other commands/requests in the hdev queue we consider
3332 * this request as completed.
3333 */
3334 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3335 hci_req_cmd_complete(hdev, opcode, status);
3336}
3337
b78752cc 3338static void hci_rx_work(struct work_struct *work)
1da177e4 3339{
b78752cc 3340 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3341 struct sk_buff *skb;
3342
3343 BT_DBG("%s", hdev->name);
3344
1da177e4 3345 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3346 /* Send copy to monitor */
3347 hci_send_to_monitor(hdev, skb);
3348
1da177e4
LT
3349 if (atomic_read(&hdev->promisc)) {
3350 /* Send copy to the sockets */
470fe1b5 3351 hci_send_to_sock(hdev, skb);
1da177e4
LT
3352 }
3353
3354 if (test_bit(HCI_RAW, &hdev->flags)) {
3355 kfree_skb(skb);
3356 continue;
3357 }
3358
3359 if (test_bit(HCI_INIT, &hdev->flags)) {
3360 /* Don't process data packets in this states. */
0d48d939 3361 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3362 case HCI_ACLDATA_PKT:
3363 case HCI_SCODATA_PKT:
3364 kfree_skb(skb);
3365 continue;
3ff50b79 3366 }
1da177e4
LT
3367 }
3368
3369 /* Process frame */
0d48d939 3370 switch (bt_cb(skb)->pkt_type) {
1da177e4 3371 case HCI_EVENT_PKT:
b78752cc 3372 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3373 hci_event_packet(hdev, skb);
3374 break;
3375
3376 case HCI_ACLDATA_PKT:
3377 BT_DBG("%s ACL data packet", hdev->name);
3378 hci_acldata_packet(hdev, skb);
3379 break;
3380
3381 case HCI_SCODATA_PKT:
3382 BT_DBG("%s SCO data packet", hdev->name);
3383 hci_scodata_packet(hdev, skb);
3384 break;
3385
3386 default:
3387 kfree_skb(skb);
3388 break;
3389 }
3390 }
1da177e4
LT
3391}
3392
c347b765 3393static void hci_cmd_work(struct work_struct *work)
1da177e4 3394{
c347b765 3395 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3396 struct sk_buff *skb;
3397
2104786b
AE
3398 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3399 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3400
1da177e4 3401 /* Send queued commands */
5a08ecce
AE
3402 if (atomic_read(&hdev->cmd_cnt)) {
3403 skb = skb_dequeue(&hdev->cmd_q);
3404 if (!skb)
3405 return;
3406
7585b97a 3407 kfree_skb(hdev->sent_cmd);
1da177e4 3408
70f23020
AE
3409 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3410 if (hdev->sent_cmd) {
1da177e4
LT
3411 atomic_dec(&hdev->cmd_cnt);
3412 hci_send_frame(skb);
7bdb8a5c
SJ
3413 if (test_bit(HCI_RESET, &hdev->flags))
3414 del_timer(&hdev->cmd_timer);
3415 else
3416 mod_timer(&hdev->cmd_timer,
5f246e89 3417 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3418 } else {
3419 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3420 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3421 }
3422 }
3423}
2519a1fc
AG
3424
3425int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3426{
3427 /* General inquiry access code (GIAC) */
3428 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3429 struct hci_cp_inquiry cp;
3430
3431 BT_DBG("%s", hdev->name);
3432
3433 if (test_bit(HCI_INQUIRY, &hdev->flags))
3434 return -EINPROGRESS;
3435
4663262c
JH
3436 inquiry_cache_flush(hdev);
3437
2519a1fc
AG
3438 memset(&cp, 0, sizeof(cp));
3439 memcpy(&cp.lap, lap, sizeof(cp.lap));
3440 cp.length = length;
3441
3442 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3443}
023d5049
AG
3444
3445int hci_cancel_inquiry(struct hci_dev *hdev)
3446{
3447 BT_DBG("%s", hdev->name);
3448
3449 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 3450 return -EALREADY;
023d5049
AG
3451
3452 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3453}
31f7956c
AG
3454
3455u8 bdaddr_to_le(u8 bdaddr_type)
3456{
3457 switch (bdaddr_type) {
3458 case BDADDR_LE_PUBLIC:
3459 return ADDR_LE_DEV_PUBLIC;
3460
3461 default:
3462 /* Fallback to LE Random address type */
3463 return ADDR_LE_DEV_RANDOM;
3464 }
3465}