Bluetooth: Track received events in hdev
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
01178cd4 83static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
84 void (*func)(struct hci_request *req,
85 unsigned long opt),
01178cd4 86 unsigned long opt, __u32 timeout)
1da177e4 87{
42c6b129 88 struct hci_request req;
1da177e4
LT
89 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
42c6b129
JH
94 hci_req_init(&req, hdev);
95
1da177e4
LT
96 hdev->req_status = HCI_REQ_PEND;
97
42c6b129 98 func(&req, opt);
53cce22d 99
42c6b129
JH
100 err = hci_req_run(&req, hci_req_sync_complete);
101 if (err < 0) {
53cce22d 102 hdev->req_status = 0;
920c8300
AG
103
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
42c6b129 108 */
920c8300
AG
109 if (err == -ENODATA)
110 return 0;
111
112 return err;
53cce22d
JH
113 }
114
bc4445c7
AG
115 add_wait_queue(&hdev->req_wait_q, &wait);
116 set_current_state(TASK_INTERRUPTIBLE);
117
1da177e4
LT
118 schedule_timeout(timeout);
119
120 remove_wait_queue(&hdev->req_wait_q, &wait);
121
122 if (signal_pending(current))
123 return -EINTR;
124
125 switch (hdev->req_status) {
126 case HCI_REQ_DONE:
e175072f 127 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
128 break;
129
130 case HCI_REQ_CANCELED:
131 err = -hdev->req_result;
132 break;
133
134 default:
135 err = -ETIMEDOUT;
136 break;
3ff50b79 137 }
1da177e4 138
a5040efa 139 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
140
141 BT_DBG("%s end: err %d", hdev->name, err);
142
143 return err;
144}
145
01178cd4 146static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
147 void (*req)(struct hci_request *req,
148 unsigned long opt),
01178cd4 149 unsigned long opt, __u32 timeout)
1da177e4
LT
150{
151 int ret;
152
7c6a329e
MH
153 if (!test_bit(HCI_UP, &hdev->flags))
154 return -ENETDOWN;
155
1da177e4
LT
156 /* Serialize all requests */
157 hci_req_lock(hdev);
01178cd4 158 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
159 hci_req_unlock(hdev);
160
161 return ret;
162}
163
42c6b129 164static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 165{
42c6b129 166 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
167
168 /* Reset device */
42c6b129
JH
169 set_bit(HCI_RESET, &req->hdev->flags);
170 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
171}
172
42c6b129 173static void bredr_init(struct hci_request *req)
1da177e4 174{
42c6b129 175 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 176
1da177e4 177 /* Read Local Supported Features */
42c6b129 178 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 179
1143e5a6 180 /* Read Local Version */
42c6b129 181 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
182
183 /* Read BD Address */
42c6b129 184 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
185}
186
42c6b129 187static void amp_init(struct hci_request *req)
e61ef499 188{
42c6b129 189 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 190
e61ef499 191 /* Read Local Version */
42c6b129 192 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
193
194 /* Read Local AMP Info */
42c6b129 195 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
196
197 /* Read Data Blk size */
42c6b129 198 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
199}
200
42c6b129 201static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 202{
42c6b129
JH
203 struct hci_dev *hdev = req->hdev;
204 struct hci_request init_req;
e61ef499
AE
205 struct sk_buff *skb;
206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
42c6b129
JH
211 hci_req_init(&init_req, hdev);
212
e61ef499
AE
213 /* Special commands */
214 while ((skb = skb_dequeue(&hdev->driver_init))) {
215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216 skb->dev = (void *) hdev;
217
42c6b129
JH
218 if (skb_queue_empty(&init_req.cmd_q))
219 bt_cb(skb)->req.start = true;
220
221 skb_queue_tail(&init_req.cmd_q, skb);
e61ef499
AE
222 }
223 skb_queue_purge(&hdev->driver_init);
224
42c6b129
JH
225 hci_req_run(&init_req, NULL);
226
11778716
AE
227 /* Reset */
228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 229 hci_reset_req(req, 0);
11778716 230
e61ef499
AE
231 switch (hdev->dev_type) {
232 case HCI_BREDR:
42c6b129 233 bredr_init(req);
e61ef499
AE
234 break;
235
236 case HCI_AMP:
42c6b129 237 amp_init(req);
e61ef499
AE
238 break;
239
240 default:
241 BT_ERR("Unknown device type %d", hdev->dev_type);
242 break;
243 }
e61ef499
AE
244}
245
42c6b129 246static void bredr_setup(struct hci_request *req)
2177bab5
JH
247{
248 struct hci_cp_delete_stored_link_key cp;
249 __le16 param;
250 __u8 flt_type;
251
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 253 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
254
255 /* Read Class of Device */
42c6b129 256 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
257
258 /* Read Local Name */
42c6b129 259 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
260
261 /* Read Voice Setting */
42c6b129 262 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
263
264 /* Clear Event Filters */
265 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 266 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
267
268 /* Connection accept timeout ~20 secs */
269 param = __constant_cpu_to_le16(0x7d00);
42c6b129 270 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
271
272 bacpy(&cp.bdaddr, BDADDR_ANY);
273 cp.delete_all = 0x01;
42c6b129 274 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
f332ec66
JH
275
276 /* Read page scan parameters */
277 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
278 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
279 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
280 }
2177bab5
JH
281}
282
42c6b129 283static void le_setup(struct hci_request *req)
2177bab5
JH
284{
285 /* Read LE Buffer Size */
42c6b129 286 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
287
288 /* Read LE Local Supported Features */
42c6b129 289 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
290
291 /* Read LE Advertising Channel TX Power */
42c6b129 292 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
293
294 /* Read LE White List Size */
42c6b129 295 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
296
297 /* Read LE Supported States */
42c6b129 298 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
2177bab5
JH
299}
300
301static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
302{
303 if (lmp_ext_inq_capable(hdev))
304 return 0x02;
305
306 if (lmp_inq_rssi_capable(hdev))
307 return 0x01;
308
309 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
310 hdev->lmp_subver == 0x0757)
311 return 0x01;
312
313 if (hdev->manufacturer == 15) {
314 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
315 return 0x01;
316 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
317 return 0x01;
318 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
319 return 0x01;
320 }
321
322 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
323 hdev->lmp_subver == 0x1805)
324 return 0x01;
325
326 return 0x00;
327}
328
42c6b129 329static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
330{
331 u8 mode;
332
42c6b129 333 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 334
42c6b129 335 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
336}
337
42c6b129 338static void hci_setup_event_mask(struct hci_request *req)
2177bab5 339{
42c6b129
JH
340 struct hci_dev *hdev = req->hdev;
341
2177bab5
JH
342 /* The second byte is 0xff instead of 0x9f (two reserved bits
343 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
344 * command otherwise.
345 */
346 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
347
348 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
349 * any event mask for pre 1.2 devices.
350 */
351 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
352 return;
353
354 if (lmp_bredr_capable(hdev)) {
355 events[4] |= 0x01; /* Flow Specification Complete */
356 events[4] |= 0x02; /* Inquiry Result with RSSI */
357 events[4] |= 0x04; /* Read Remote Extended Features Complete */
358 events[5] |= 0x08; /* Synchronous Connection Complete */
359 events[5] |= 0x10; /* Synchronous Connection Changed */
360 }
361
362 if (lmp_inq_rssi_capable(hdev))
363 events[4] |= 0x02; /* Inquiry Result with RSSI */
364
365 if (lmp_sniffsubr_capable(hdev))
366 events[5] |= 0x20; /* Sniff Subrating */
367
368 if (lmp_pause_enc_capable(hdev))
369 events[5] |= 0x80; /* Encryption Key Refresh Complete */
370
371 if (lmp_ext_inq_capable(hdev))
372 events[5] |= 0x40; /* Extended Inquiry Result */
373
374 if (lmp_no_flush_capable(hdev))
375 events[7] |= 0x01; /* Enhanced Flush Complete */
376
377 if (lmp_lsto_capable(hdev))
378 events[6] |= 0x80; /* Link Supervision Timeout Changed */
379
380 if (lmp_ssp_capable(hdev)) {
381 events[6] |= 0x01; /* IO Capability Request */
382 events[6] |= 0x02; /* IO Capability Response */
383 events[6] |= 0x04; /* User Confirmation Request */
384 events[6] |= 0x08; /* User Passkey Request */
385 events[6] |= 0x10; /* Remote OOB Data Request */
386 events[6] |= 0x20; /* Simple Pairing Complete */
387 events[7] |= 0x04; /* User Passkey Notification */
388 events[7] |= 0x08; /* Keypress Notification */
389 events[7] |= 0x10; /* Remote Host Supported
390 * Features Notification
391 */
392 }
393
394 if (lmp_le_capable(hdev))
395 events[7] |= 0x20; /* LE Meta-Event */
396
42c6b129 397 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
398
399 if (lmp_le_capable(hdev)) {
400 memset(events, 0, sizeof(events));
401 events[0] = 0x1f;
42c6b129
JH
402 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
403 sizeof(events), events);
2177bab5
JH
404 }
405}
406
42c6b129 407static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 408{
42c6b129
JH
409 struct hci_dev *hdev = req->hdev;
410
2177bab5 411 if (lmp_bredr_capable(hdev))
42c6b129 412 bredr_setup(req);
2177bab5
JH
413
414 if (lmp_le_capable(hdev))
42c6b129 415 le_setup(req);
2177bab5 416
42c6b129 417 hci_setup_event_mask(req);
2177bab5
JH
418
419 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 420 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
421
422 if (lmp_ssp_capable(hdev)) {
423 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
424 u8 mode = 0x01;
42c6b129
JH
425 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
426 sizeof(mode), &mode);
2177bab5
JH
427 } else {
428 struct hci_cp_write_eir cp;
429
430 memset(hdev->eir, 0, sizeof(hdev->eir));
431 memset(&cp, 0, sizeof(cp));
432
42c6b129 433 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
434 }
435 }
436
437 if (lmp_inq_rssi_capable(hdev))
42c6b129 438 hci_setup_inquiry_mode(req);
2177bab5
JH
439
440 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 441 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
442
443 if (lmp_ext_feat_capable(hdev)) {
444 struct hci_cp_read_local_ext_features cp;
445
446 cp.page = 0x01;
42c6b129
JH
447 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
448 sizeof(cp), &cp);
2177bab5
JH
449 }
450
451 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
452 u8 enable = 1;
42c6b129
JH
453 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
454 &enable);
2177bab5
JH
455 }
456}
457
42c6b129 458static void hci_setup_link_policy(struct hci_request *req)
2177bab5 459{
42c6b129 460 struct hci_dev *hdev = req->hdev;
2177bab5
JH
461 struct hci_cp_write_def_link_policy cp;
462 u16 link_policy = 0;
463
464 if (lmp_rswitch_capable(hdev))
465 link_policy |= HCI_LP_RSWITCH;
466 if (lmp_hold_capable(hdev))
467 link_policy |= HCI_LP_HOLD;
468 if (lmp_sniff_capable(hdev))
469 link_policy |= HCI_LP_SNIFF;
470 if (lmp_park_capable(hdev))
471 link_policy |= HCI_LP_PARK;
472
473 cp.policy = cpu_to_le16(link_policy);
42c6b129 474 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
475}
476
42c6b129 477static void hci_set_le_support(struct hci_request *req)
2177bab5 478{
42c6b129 479 struct hci_dev *hdev = req->hdev;
2177bab5
JH
480 struct hci_cp_write_le_host_supported cp;
481
482 memset(&cp, 0, sizeof(cp));
483
484 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
485 cp.le = 0x01;
486 cp.simul = lmp_le_br_capable(hdev);
487 }
488
489 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
490 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
491 &cp);
2177bab5
JH
492}
493
42c6b129 494static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 495{
42c6b129
JH
496 struct hci_dev *hdev = req->hdev;
497
2177bab5 498 if (hdev->commands[5] & 0x10)
42c6b129 499 hci_setup_link_policy(req);
2177bab5 500
04b4edcb 501 if (lmp_le_capable(hdev)) {
42c6b129 502 hci_set_le_support(req);
04b4edcb
JH
503 hci_update_ad(req);
504 }
2177bab5
JH
505}
506
507static int __hci_init(struct hci_dev *hdev)
508{
509 int err;
510
511 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
512 if (err < 0)
513 return err;
514
515 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
516 * BR/EDR/LE type controllers. AMP controllers only need the
517 * first stage init.
518 */
519 if (hdev->dev_type != HCI_BREDR)
520 return 0;
521
522 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
523 if (err < 0)
524 return err;
525
526 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
527}
528
42c6b129 529static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
530{
531 __u8 scan = opt;
532
42c6b129 533 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
534
535 /* Inquiry and Page scans */
42c6b129 536 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
537}
538
42c6b129 539static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
540{
541 __u8 auth = opt;
542
42c6b129 543 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
544
545 /* Authentication */
42c6b129 546 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
547}
548
42c6b129 549static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
550{
551 __u8 encrypt = opt;
552
42c6b129 553 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 554
e4e8e37c 555 /* Encryption */
42c6b129 556 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
557}
558
42c6b129 559static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
560{
561 __le16 policy = cpu_to_le16(opt);
562
42c6b129 563 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
564
565 /* Default link policy */
42c6b129 566 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
567}
568
8e87d142 569/* Get HCI device by index.
1da177e4
LT
570 * Device is held on return. */
571struct hci_dev *hci_dev_get(int index)
572{
8035ded4 573 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
574
575 BT_DBG("%d", index);
576
577 if (index < 0)
578 return NULL;
579
580 read_lock(&hci_dev_list_lock);
8035ded4 581 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
582 if (d->id == index) {
583 hdev = hci_dev_hold(d);
584 break;
585 }
586 }
587 read_unlock(&hci_dev_list_lock);
588 return hdev;
589}
1da177e4
LT
590
591/* ---- Inquiry support ---- */
ff9ef578 592
30dc78e1
JH
593bool hci_discovery_active(struct hci_dev *hdev)
594{
595 struct discovery_state *discov = &hdev->discovery;
596
6fbe195d 597 switch (discov->state) {
343f935b 598 case DISCOVERY_FINDING:
6fbe195d 599 case DISCOVERY_RESOLVING:
30dc78e1
JH
600 return true;
601
6fbe195d
AG
602 default:
603 return false;
604 }
30dc78e1
JH
605}
606
ff9ef578
JH
607void hci_discovery_set_state(struct hci_dev *hdev, int state)
608{
609 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
610
611 if (hdev->discovery.state == state)
612 return;
613
614 switch (state) {
615 case DISCOVERY_STOPPED:
7b99b659
AG
616 if (hdev->discovery.state != DISCOVERY_STARTING)
617 mgmt_discovering(hdev, 0);
ff9ef578
JH
618 break;
619 case DISCOVERY_STARTING:
620 break;
343f935b 621 case DISCOVERY_FINDING:
ff9ef578
JH
622 mgmt_discovering(hdev, 1);
623 break;
30dc78e1
JH
624 case DISCOVERY_RESOLVING:
625 break;
ff9ef578
JH
626 case DISCOVERY_STOPPING:
627 break;
628 }
629
630 hdev->discovery.state = state;
631}
632
1da177e4
LT
633static void inquiry_cache_flush(struct hci_dev *hdev)
634{
30883512 635 struct discovery_state *cache = &hdev->discovery;
b57c1a56 636 struct inquiry_entry *p, *n;
1da177e4 637
561aafbc
JH
638 list_for_each_entry_safe(p, n, &cache->all, all) {
639 list_del(&p->all);
b57c1a56 640 kfree(p);
1da177e4 641 }
561aafbc
JH
642
643 INIT_LIST_HEAD(&cache->unknown);
644 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
645}
646
a8c5fb1a
GP
647struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
648 bdaddr_t *bdaddr)
1da177e4 649{
30883512 650 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
651 struct inquiry_entry *e;
652
6ed93dc6 653 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 654
561aafbc
JH
655 list_for_each_entry(e, &cache->all, all) {
656 if (!bacmp(&e->data.bdaddr, bdaddr))
657 return e;
658 }
659
660 return NULL;
661}
662
663struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 664 bdaddr_t *bdaddr)
561aafbc 665{
30883512 666 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
667 struct inquiry_entry *e;
668
6ed93dc6 669 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
670
671 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 672 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
673 return e;
674 }
675
676 return NULL;
1da177e4
LT
677}
678
30dc78e1 679struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
680 bdaddr_t *bdaddr,
681 int state)
30dc78e1
JH
682{
683 struct discovery_state *cache = &hdev->discovery;
684 struct inquiry_entry *e;
685
6ed93dc6 686 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
687
688 list_for_each_entry(e, &cache->resolve, list) {
689 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
690 return e;
691 if (!bacmp(&e->data.bdaddr, bdaddr))
692 return e;
693 }
694
695 return NULL;
696}
697
a3d4e20a 698void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 699 struct inquiry_entry *ie)
a3d4e20a
JH
700{
701 struct discovery_state *cache = &hdev->discovery;
702 struct list_head *pos = &cache->resolve;
703 struct inquiry_entry *p;
704
705 list_del(&ie->list);
706
707 list_for_each_entry(p, &cache->resolve, list) {
708 if (p->name_state != NAME_PENDING &&
a8c5fb1a 709 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
710 break;
711 pos = &p->list;
712 }
713
714 list_add(&ie->list, pos);
715}
716
3175405b 717bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 718 bool name_known, bool *ssp)
1da177e4 719{
30883512 720 struct discovery_state *cache = &hdev->discovery;
70f23020 721 struct inquiry_entry *ie;
1da177e4 722
6ed93dc6 723 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 724
2b2fec4d
SJ
725 hci_remove_remote_oob_data(hdev, &data->bdaddr);
726
388fc8fa
JH
727 if (ssp)
728 *ssp = data->ssp_mode;
729
70f23020 730 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 731 if (ie) {
388fc8fa
JH
732 if (ie->data.ssp_mode && ssp)
733 *ssp = true;
734
a3d4e20a 735 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 736 data->rssi != ie->data.rssi) {
a3d4e20a
JH
737 ie->data.rssi = data->rssi;
738 hci_inquiry_cache_update_resolve(hdev, ie);
739 }
740
561aafbc 741 goto update;
a3d4e20a 742 }
561aafbc
JH
743
744 /* Entry not in the cache. Add new one. */
745 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
746 if (!ie)
3175405b 747 return false;
561aafbc
JH
748
749 list_add(&ie->all, &cache->all);
750
751 if (name_known) {
752 ie->name_state = NAME_KNOWN;
753 } else {
754 ie->name_state = NAME_NOT_KNOWN;
755 list_add(&ie->list, &cache->unknown);
756 }
70f23020 757
561aafbc
JH
758update:
759 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 760 ie->name_state != NAME_PENDING) {
561aafbc
JH
761 ie->name_state = NAME_KNOWN;
762 list_del(&ie->list);
1da177e4
LT
763 }
764
70f23020
AE
765 memcpy(&ie->data, data, sizeof(*data));
766 ie->timestamp = jiffies;
1da177e4 767 cache->timestamp = jiffies;
3175405b
JH
768
769 if (ie->name_state == NAME_NOT_KNOWN)
770 return false;
771
772 return true;
1da177e4
LT
773}
774
775static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
776{
30883512 777 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
778 struct inquiry_info *info = (struct inquiry_info *) buf;
779 struct inquiry_entry *e;
780 int copied = 0;
781
561aafbc 782 list_for_each_entry(e, &cache->all, all) {
1da177e4 783 struct inquiry_data *data = &e->data;
b57c1a56
JH
784
785 if (copied >= num)
786 break;
787
1da177e4
LT
788 bacpy(&info->bdaddr, &data->bdaddr);
789 info->pscan_rep_mode = data->pscan_rep_mode;
790 info->pscan_period_mode = data->pscan_period_mode;
791 info->pscan_mode = data->pscan_mode;
792 memcpy(info->dev_class, data->dev_class, 3);
793 info->clock_offset = data->clock_offset;
b57c1a56 794
1da177e4 795 info++;
b57c1a56 796 copied++;
1da177e4
LT
797 }
798
799 BT_DBG("cache %p, copied %d", cache, copied);
800 return copied;
801}
802
42c6b129 803static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
804{
805 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 806 struct hci_dev *hdev = req->hdev;
1da177e4
LT
807 struct hci_cp_inquiry cp;
808
809 BT_DBG("%s", hdev->name);
810
811 if (test_bit(HCI_INQUIRY, &hdev->flags))
812 return;
813
814 /* Start Inquiry */
815 memcpy(&cp.lap, &ir->lap, 3);
816 cp.length = ir->length;
817 cp.num_rsp = ir->num_rsp;
42c6b129 818 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
819}
820
3e13fa1e
AG
821static int wait_inquiry(void *word)
822{
823 schedule();
824 return signal_pending(current);
825}
826
1da177e4
LT
827int hci_inquiry(void __user *arg)
828{
829 __u8 __user *ptr = arg;
830 struct hci_inquiry_req ir;
831 struct hci_dev *hdev;
832 int err = 0, do_inquiry = 0, max_rsp;
833 long timeo;
834 __u8 *buf;
835
836 if (copy_from_user(&ir, ptr, sizeof(ir)))
837 return -EFAULT;
838
5a08ecce
AE
839 hdev = hci_dev_get(ir.dev_id);
840 if (!hdev)
1da177e4
LT
841 return -ENODEV;
842
09fd0de5 843 hci_dev_lock(hdev);
8e87d142 844 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 845 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
846 inquiry_cache_flush(hdev);
847 do_inquiry = 1;
848 }
09fd0de5 849 hci_dev_unlock(hdev);
1da177e4 850
04837f64 851 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
852
853 if (do_inquiry) {
01178cd4
JH
854 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
855 timeo);
70f23020
AE
856 if (err < 0)
857 goto done;
3e13fa1e
AG
858
859 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
860 * cleared). If it is interrupted by a signal, return -EINTR.
861 */
862 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
863 TASK_INTERRUPTIBLE))
864 return -EINTR;
70f23020 865 }
1da177e4 866
8fc9ced3
GP
867 /* for unlimited number of responses we will use buffer with
868 * 255 entries
869 */
1da177e4
LT
870 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
871
872 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
873 * copy it to the user space.
874 */
01df8c31 875 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 876 if (!buf) {
1da177e4
LT
877 err = -ENOMEM;
878 goto done;
879 }
880
09fd0de5 881 hci_dev_lock(hdev);
1da177e4 882 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 883 hci_dev_unlock(hdev);
1da177e4
LT
884
885 BT_DBG("num_rsp %d", ir.num_rsp);
886
887 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
888 ptr += sizeof(ir);
889 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 890 ir.num_rsp))
1da177e4 891 err = -EFAULT;
8e87d142 892 } else
1da177e4
LT
893 err = -EFAULT;
894
895 kfree(buf);
896
897done:
898 hci_dev_put(hdev);
899 return err;
900}
901
3f0f524b
JH
902static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
903{
904 u8 ad_len = 0, flags = 0;
905 size_t name_len;
906
907 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
908 flags |= LE_AD_GENERAL;
909
910 if (!lmp_bredr_capable(hdev))
911 flags |= LE_AD_NO_BREDR;
912
913 if (lmp_le_br_capable(hdev))
914 flags |= LE_AD_SIM_LE_BREDR_CTRL;
915
916 if (lmp_host_le_br_capable(hdev))
917 flags |= LE_AD_SIM_LE_BREDR_HOST;
918
919 if (flags) {
920 BT_DBG("adv flags 0x%02x", flags);
921
922 ptr[0] = 2;
923 ptr[1] = EIR_FLAGS;
924 ptr[2] = flags;
925
926 ad_len += 3;
927 ptr += 3;
928 }
929
930 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
931 ptr[0] = 2;
932 ptr[1] = EIR_TX_POWER;
933 ptr[2] = (u8) hdev->adv_tx_power;
934
935 ad_len += 3;
936 ptr += 3;
937 }
938
939 name_len = strlen(hdev->dev_name);
940 if (name_len > 0) {
941 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
942
943 if (name_len > max_len) {
944 name_len = max_len;
945 ptr[1] = EIR_NAME_SHORT;
946 } else
947 ptr[1] = EIR_NAME_COMPLETE;
948
949 ptr[0] = name_len + 1;
950
951 memcpy(ptr + 2, hdev->dev_name, name_len);
952
953 ad_len += (name_len + 2);
954 ptr += (name_len + 2);
955 }
956
957 return ad_len;
958}
959
04b4edcb 960void hci_update_ad(struct hci_request *req)
3f0f524b 961{
04b4edcb 962 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
963 struct hci_cp_le_set_adv_data cp;
964 u8 len;
3f0f524b 965
04b4edcb
JH
966 if (!lmp_le_capable(hdev))
967 return;
3f0f524b
JH
968
969 memset(&cp, 0, sizeof(cp));
970
971 len = create_ad(hdev, cp.data);
972
973 if (hdev->adv_data_len == len &&
04b4edcb
JH
974 memcmp(cp.data, hdev->adv_data, len) == 0)
975 return;
3f0f524b
JH
976
977 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
978 hdev->adv_data_len = len;
979
980 cp.length = len;
3f0f524b 981
04b4edcb 982 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
983}
984
1da177e4
LT
985/* ---- HCI ioctl helpers ---- */
986
987int hci_dev_open(__u16 dev)
988{
989 struct hci_dev *hdev;
990 int ret = 0;
991
5a08ecce
AE
992 hdev = hci_dev_get(dev);
993 if (!hdev)
1da177e4
LT
994 return -ENODEV;
995
996 BT_DBG("%s %p", hdev->name, hdev);
997
998 hci_req_lock(hdev);
999
94324962
JH
1000 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1001 ret = -ENODEV;
1002 goto done;
1003 }
1004
611b30f7
MH
1005 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1006 ret = -ERFKILL;
1007 goto done;
1008 }
1009
1da177e4
LT
1010 if (test_bit(HCI_UP, &hdev->flags)) {
1011 ret = -EALREADY;
1012 goto done;
1013 }
1014
1015 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1016 set_bit(HCI_RAW, &hdev->flags);
1017
07e3b94a
AE
1018 /* Treat all non BR/EDR controllers as raw devices if
1019 enable_hs is not set */
1020 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
1021 set_bit(HCI_RAW, &hdev->flags);
1022
1da177e4
LT
1023 if (hdev->open(hdev)) {
1024 ret = -EIO;
1025 goto done;
1026 }
1027
1028 if (!test_bit(HCI_RAW, &hdev->flags)) {
1029 atomic_set(&hdev->cmd_cnt, 1);
1030 set_bit(HCI_INIT, &hdev->flags);
2177bab5 1031 ret = __hci_init(hdev);
1da177e4
LT
1032 clear_bit(HCI_INIT, &hdev->flags);
1033 }
1034
1035 if (!ret) {
1036 hci_dev_hold(hdev);
1037 set_bit(HCI_UP, &hdev->flags);
1038 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a
AE
1039 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1040 mgmt_valid_hdev(hdev)) {
09fd0de5 1041 hci_dev_lock(hdev);
744cf19e 1042 mgmt_powered(hdev, 1);
09fd0de5 1043 hci_dev_unlock(hdev);
56e5cb86 1044 }
8e87d142 1045 } else {
1da177e4 1046 /* Init failed, cleanup */
3eff45ea 1047 flush_work(&hdev->tx_work);
c347b765 1048 flush_work(&hdev->cmd_work);
b78752cc 1049 flush_work(&hdev->rx_work);
1da177e4
LT
1050
1051 skb_queue_purge(&hdev->cmd_q);
1052 skb_queue_purge(&hdev->rx_q);
1053
1054 if (hdev->flush)
1055 hdev->flush(hdev);
1056
1057 if (hdev->sent_cmd) {
1058 kfree_skb(hdev->sent_cmd);
1059 hdev->sent_cmd = NULL;
1060 }
1061
1062 hdev->close(hdev);
1063 hdev->flags = 0;
1064 }
1065
1066done:
1067 hci_req_unlock(hdev);
1068 hci_dev_put(hdev);
1069 return ret;
1070}
1071
1072static int hci_dev_do_close(struct hci_dev *hdev)
1073{
1074 BT_DBG("%s %p", hdev->name, hdev);
1075
28b75a89
AG
1076 cancel_work_sync(&hdev->le_scan);
1077
78c04c0b
VCG
1078 cancel_delayed_work(&hdev->power_off);
1079
1da177e4
LT
1080 hci_req_cancel(hdev, ENODEV);
1081 hci_req_lock(hdev);
1082
1083 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1084 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1085 hci_req_unlock(hdev);
1086 return 0;
1087 }
1088
3eff45ea
GP
1089 /* Flush RX and TX works */
1090 flush_work(&hdev->tx_work);
b78752cc 1091 flush_work(&hdev->rx_work);
1da177e4 1092
16ab91ab 1093 if (hdev->discov_timeout > 0) {
e0f9309f 1094 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1095 hdev->discov_timeout = 0;
5e5282bb 1096 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1097 }
1098
a8b2d5c2 1099 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1100 cancel_delayed_work(&hdev->service_cache);
1101
7ba8b4be
AG
1102 cancel_delayed_work_sync(&hdev->le_scan_disable);
1103
09fd0de5 1104 hci_dev_lock(hdev);
1da177e4
LT
1105 inquiry_cache_flush(hdev);
1106 hci_conn_hash_flush(hdev);
09fd0de5 1107 hci_dev_unlock(hdev);
1da177e4
LT
1108
1109 hci_notify(hdev, HCI_DEV_DOWN);
1110
1111 if (hdev->flush)
1112 hdev->flush(hdev);
1113
1114 /* Reset device */
1115 skb_queue_purge(&hdev->cmd_q);
1116 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1117 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1118 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1119 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1120 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1121 clear_bit(HCI_INIT, &hdev->flags);
1122 }
1123
c347b765
GP
1124 /* flush cmd work */
1125 flush_work(&hdev->cmd_work);
1da177e4
LT
1126
1127 /* Drop queues */
1128 skb_queue_purge(&hdev->rx_q);
1129 skb_queue_purge(&hdev->cmd_q);
1130 skb_queue_purge(&hdev->raw_q);
1131
1132 /* Drop last sent command */
1133 if (hdev->sent_cmd) {
b79f44c1 1134 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1135 kfree_skb(hdev->sent_cmd);
1136 hdev->sent_cmd = NULL;
1137 }
1138
b6ddb638
JH
1139 kfree_skb(hdev->recv_evt);
1140 hdev->recv_evt = NULL;
1141
1da177e4
LT
1142 /* After this point our queues are empty
1143 * and no tasks are scheduled. */
1144 hdev->close(hdev);
1145
35b973c9
JH
1146 /* Clear flags */
1147 hdev->flags = 0;
1148 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1149
bb4b2a9a
AE
1150 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1151 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1152 hci_dev_lock(hdev);
1153 mgmt_powered(hdev, 0);
1154 hci_dev_unlock(hdev);
1155 }
5add6af8 1156
ced5c338
AE
1157 /* Controller radio is available but is currently powered down */
1158 hdev->amp_status = 0;
1159
e59fda8d 1160 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1161 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1162
1da177e4
LT
1163 hci_req_unlock(hdev);
1164
1165 hci_dev_put(hdev);
1166 return 0;
1167}
1168
1169int hci_dev_close(__u16 dev)
1170{
1171 struct hci_dev *hdev;
1172 int err;
1173
70f23020
AE
1174 hdev = hci_dev_get(dev);
1175 if (!hdev)
1da177e4 1176 return -ENODEV;
8ee56540
MH
1177
1178 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1179 cancel_delayed_work(&hdev->power_off);
1180
1da177e4 1181 err = hci_dev_do_close(hdev);
8ee56540 1182
1da177e4
LT
1183 hci_dev_put(hdev);
1184 return err;
1185}
1186
1187int hci_dev_reset(__u16 dev)
1188{
1189 struct hci_dev *hdev;
1190 int ret = 0;
1191
70f23020
AE
1192 hdev = hci_dev_get(dev);
1193 if (!hdev)
1da177e4
LT
1194 return -ENODEV;
1195
1196 hci_req_lock(hdev);
1da177e4
LT
1197
1198 if (!test_bit(HCI_UP, &hdev->flags))
1199 goto done;
1200
1201 /* Drop queues */
1202 skb_queue_purge(&hdev->rx_q);
1203 skb_queue_purge(&hdev->cmd_q);
1204
09fd0de5 1205 hci_dev_lock(hdev);
1da177e4
LT
1206 inquiry_cache_flush(hdev);
1207 hci_conn_hash_flush(hdev);
09fd0de5 1208 hci_dev_unlock(hdev);
1da177e4
LT
1209
1210 if (hdev->flush)
1211 hdev->flush(hdev);
1212
8e87d142 1213 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1214 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1215
1216 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1217 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1218
1219done:
1da177e4
LT
1220 hci_req_unlock(hdev);
1221 hci_dev_put(hdev);
1222 return ret;
1223}
1224
1225int hci_dev_reset_stat(__u16 dev)
1226{
1227 struct hci_dev *hdev;
1228 int ret = 0;
1229
70f23020
AE
1230 hdev = hci_dev_get(dev);
1231 if (!hdev)
1da177e4
LT
1232 return -ENODEV;
1233
1234 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1235
1236 hci_dev_put(hdev);
1237
1238 return ret;
1239}
1240
1241int hci_dev_cmd(unsigned int cmd, void __user *arg)
1242{
1243 struct hci_dev *hdev;
1244 struct hci_dev_req dr;
1245 int err = 0;
1246
1247 if (copy_from_user(&dr, arg, sizeof(dr)))
1248 return -EFAULT;
1249
70f23020
AE
1250 hdev = hci_dev_get(dr.dev_id);
1251 if (!hdev)
1da177e4
LT
1252 return -ENODEV;
1253
1254 switch (cmd) {
1255 case HCISETAUTH:
01178cd4
JH
1256 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1257 HCI_INIT_TIMEOUT);
1da177e4
LT
1258 break;
1259
1260 case HCISETENCRYPT:
1261 if (!lmp_encrypt_capable(hdev)) {
1262 err = -EOPNOTSUPP;
1263 break;
1264 }
1265
1266 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1267 /* Auth must be enabled first */
01178cd4
JH
1268 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1269 HCI_INIT_TIMEOUT);
1da177e4
LT
1270 if (err)
1271 break;
1272 }
1273
01178cd4
JH
1274 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1275 HCI_INIT_TIMEOUT);
1da177e4
LT
1276 break;
1277
1278 case HCISETSCAN:
01178cd4
JH
1279 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1280 HCI_INIT_TIMEOUT);
1da177e4
LT
1281 break;
1282
1da177e4 1283 case HCISETLINKPOL:
01178cd4
JH
1284 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1285 HCI_INIT_TIMEOUT);
1da177e4
LT
1286 break;
1287
1288 case HCISETLINKMODE:
e4e8e37c
MH
1289 hdev->link_mode = ((__u16) dr.dev_opt) &
1290 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1291 break;
1292
1293 case HCISETPTYPE:
1294 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1295 break;
1296
1297 case HCISETACLMTU:
e4e8e37c
MH
1298 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1299 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1300 break;
1301
1302 case HCISETSCOMTU:
e4e8e37c
MH
1303 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1304 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1305 break;
1306
1307 default:
1308 err = -EINVAL;
1309 break;
1310 }
e4e8e37c 1311
1da177e4
LT
1312 hci_dev_put(hdev);
1313 return err;
1314}
1315
1316int hci_get_dev_list(void __user *arg)
1317{
8035ded4 1318 struct hci_dev *hdev;
1da177e4
LT
1319 struct hci_dev_list_req *dl;
1320 struct hci_dev_req *dr;
1da177e4
LT
1321 int n = 0, size, err;
1322 __u16 dev_num;
1323
1324 if (get_user(dev_num, (__u16 __user *) arg))
1325 return -EFAULT;
1326
1327 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1328 return -EINVAL;
1329
1330 size = sizeof(*dl) + dev_num * sizeof(*dr);
1331
70f23020
AE
1332 dl = kzalloc(size, GFP_KERNEL);
1333 if (!dl)
1da177e4
LT
1334 return -ENOMEM;
1335
1336 dr = dl->dev_req;
1337
f20d09d5 1338 read_lock(&hci_dev_list_lock);
8035ded4 1339 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1340 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1341 cancel_delayed_work(&hdev->power_off);
c542a06c 1342
a8b2d5c2
JH
1343 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1344 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1345
1da177e4
LT
1346 (dr + n)->dev_id = hdev->id;
1347 (dr + n)->dev_opt = hdev->flags;
c542a06c 1348
1da177e4
LT
1349 if (++n >= dev_num)
1350 break;
1351 }
f20d09d5 1352 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1353
1354 dl->dev_num = n;
1355 size = sizeof(*dl) + n * sizeof(*dr);
1356
1357 err = copy_to_user(arg, dl, size);
1358 kfree(dl);
1359
1360 return err ? -EFAULT : 0;
1361}
1362
1363int hci_get_dev_info(void __user *arg)
1364{
1365 struct hci_dev *hdev;
1366 struct hci_dev_info di;
1367 int err = 0;
1368
1369 if (copy_from_user(&di, arg, sizeof(di)))
1370 return -EFAULT;
1371
70f23020
AE
1372 hdev = hci_dev_get(di.dev_id);
1373 if (!hdev)
1da177e4
LT
1374 return -ENODEV;
1375
a8b2d5c2 1376 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1377 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1378
a8b2d5c2
JH
1379 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1380 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1381
1da177e4
LT
1382 strcpy(di.name, hdev->name);
1383 di.bdaddr = hdev->bdaddr;
943da25d 1384 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1385 di.flags = hdev->flags;
1386 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1387 if (lmp_bredr_capable(hdev)) {
1388 di.acl_mtu = hdev->acl_mtu;
1389 di.acl_pkts = hdev->acl_pkts;
1390 di.sco_mtu = hdev->sco_mtu;
1391 di.sco_pkts = hdev->sco_pkts;
1392 } else {
1393 di.acl_mtu = hdev->le_mtu;
1394 di.acl_pkts = hdev->le_pkts;
1395 di.sco_mtu = 0;
1396 di.sco_pkts = 0;
1397 }
1da177e4
LT
1398 di.link_policy = hdev->link_policy;
1399 di.link_mode = hdev->link_mode;
1400
1401 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1402 memcpy(&di.features, &hdev->features, sizeof(di.features));
1403
1404 if (copy_to_user(arg, &di, sizeof(di)))
1405 err = -EFAULT;
1406
1407 hci_dev_put(hdev);
1408
1409 return err;
1410}
1411
1412/* ---- Interface to HCI drivers ---- */
1413
611b30f7
MH
1414static int hci_rfkill_set_block(void *data, bool blocked)
1415{
1416 struct hci_dev *hdev = data;
1417
1418 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1419
1420 if (!blocked)
1421 return 0;
1422
1423 hci_dev_do_close(hdev);
1424
1425 return 0;
1426}
1427
1428static const struct rfkill_ops hci_rfkill_ops = {
1429 .set_block = hci_rfkill_set_block,
1430};
1431
ab81cbf9
JH
1432static void hci_power_on(struct work_struct *work)
1433{
1434 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1435
1436 BT_DBG("%s", hdev->name);
1437
1438 if (hci_dev_open(hdev->id) < 0)
1439 return;
1440
a8b2d5c2 1441 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1442 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1443 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1444
a8b2d5c2 1445 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1446 mgmt_index_added(hdev);
ab81cbf9
JH
1447}
1448
1449static void hci_power_off(struct work_struct *work)
1450{
3243553f 1451 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1452 power_off.work);
ab81cbf9
JH
1453
1454 BT_DBG("%s", hdev->name);
1455
8ee56540 1456 hci_dev_do_close(hdev);
ab81cbf9
JH
1457}
1458
16ab91ab
JH
1459static void hci_discov_off(struct work_struct *work)
1460{
1461 struct hci_dev *hdev;
1462 u8 scan = SCAN_PAGE;
1463
1464 hdev = container_of(work, struct hci_dev, discov_off.work);
1465
1466 BT_DBG("%s", hdev->name);
1467
09fd0de5 1468 hci_dev_lock(hdev);
16ab91ab
JH
1469
1470 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1471
1472 hdev->discov_timeout = 0;
1473
09fd0de5 1474 hci_dev_unlock(hdev);
16ab91ab
JH
1475}
1476
2aeb9a1a
JH
1477int hci_uuids_clear(struct hci_dev *hdev)
1478{
4821002c 1479 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1480
4821002c
JH
1481 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1482 list_del(&uuid->list);
2aeb9a1a
JH
1483 kfree(uuid);
1484 }
1485
1486 return 0;
1487}
1488
55ed8ca1
JH
1489int hci_link_keys_clear(struct hci_dev *hdev)
1490{
1491 struct list_head *p, *n;
1492
1493 list_for_each_safe(p, n, &hdev->link_keys) {
1494 struct link_key *key;
1495
1496 key = list_entry(p, struct link_key, list);
1497
1498 list_del(p);
1499 kfree(key);
1500 }
1501
1502 return 0;
1503}
1504
b899efaf
VCG
1505int hci_smp_ltks_clear(struct hci_dev *hdev)
1506{
1507 struct smp_ltk *k, *tmp;
1508
1509 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1510 list_del(&k->list);
1511 kfree(k);
1512 }
1513
1514 return 0;
1515}
1516
55ed8ca1
JH
1517struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1518{
8035ded4 1519 struct link_key *k;
55ed8ca1 1520
8035ded4 1521 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1522 if (bacmp(bdaddr, &k->bdaddr) == 0)
1523 return k;
55ed8ca1
JH
1524
1525 return NULL;
1526}
1527
745c0ce3 1528static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1529 u8 key_type, u8 old_key_type)
d25e28ab
JH
1530{
1531 /* Legacy key */
1532 if (key_type < 0x03)
745c0ce3 1533 return true;
d25e28ab
JH
1534
1535 /* Debug keys are insecure so don't store them persistently */
1536 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1537 return false;
d25e28ab
JH
1538
1539 /* Changed combination key and there's no previous one */
1540 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1541 return false;
d25e28ab
JH
1542
1543 /* Security mode 3 case */
1544 if (!conn)
745c0ce3 1545 return true;
d25e28ab
JH
1546
1547 /* Neither local nor remote side had no-bonding as requirement */
1548 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1549 return true;
d25e28ab
JH
1550
1551 /* Local side had dedicated bonding as requirement */
1552 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1553 return true;
d25e28ab
JH
1554
1555 /* Remote side had dedicated bonding as requirement */
1556 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1557 return true;
d25e28ab
JH
1558
1559 /* If none of the above criteria match, then don't store the key
1560 * persistently */
745c0ce3 1561 return false;
d25e28ab
JH
1562}
1563
c9839a11 1564struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1565{
c9839a11 1566 struct smp_ltk *k;
75d262c2 1567
c9839a11
VCG
1568 list_for_each_entry(k, &hdev->long_term_keys, list) {
1569 if (k->ediv != ediv ||
a8c5fb1a 1570 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1571 continue;
1572
c9839a11 1573 return k;
75d262c2
VCG
1574 }
1575
1576 return NULL;
1577}
75d262c2 1578
c9839a11 1579struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1580 u8 addr_type)
75d262c2 1581{
c9839a11 1582 struct smp_ltk *k;
75d262c2 1583
c9839a11
VCG
1584 list_for_each_entry(k, &hdev->long_term_keys, list)
1585 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1586 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1587 return k;
1588
1589 return NULL;
1590}
75d262c2 1591
d25e28ab 1592int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1593 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1594{
1595 struct link_key *key, *old_key;
745c0ce3
VA
1596 u8 old_key_type;
1597 bool persistent;
55ed8ca1
JH
1598
1599 old_key = hci_find_link_key(hdev, bdaddr);
1600 if (old_key) {
1601 old_key_type = old_key->type;
1602 key = old_key;
1603 } else {
12adcf3a 1604 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1605 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1606 if (!key)
1607 return -ENOMEM;
1608 list_add(&key->list, &hdev->link_keys);
1609 }
1610
6ed93dc6 1611 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1612
d25e28ab
JH
1613 /* Some buggy controller combinations generate a changed
1614 * combination key for legacy pairing even when there's no
1615 * previous key */
1616 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1617 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1618 type = HCI_LK_COMBINATION;
655fe6ec
JH
1619 if (conn)
1620 conn->key_type = type;
1621 }
d25e28ab 1622
55ed8ca1 1623 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1624 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1625 key->pin_len = pin_len;
1626
b6020ba0 1627 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1628 key->type = old_key_type;
4748fed2
JH
1629 else
1630 key->type = type;
1631
4df378a1
JH
1632 if (!new_key)
1633 return 0;
1634
1635 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1636
744cf19e 1637 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1638
6ec5bcad
VA
1639 if (conn)
1640 conn->flush_key = !persistent;
55ed8ca1
JH
1641
1642 return 0;
1643}
1644
c9839a11 1645int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1646 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1647 ediv, u8 rand[8])
75d262c2 1648{
c9839a11 1649 struct smp_ltk *key, *old_key;
75d262c2 1650
c9839a11
VCG
1651 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1652 return 0;
75d262c2 1653
c9839a11
VCG
1654 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1655 if (old_key)
75d262c2 1656 key = old_key;
c9839a11
VCG
1657 else {
1658 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1659 if (!key)
1660 return -ENOMEM;
c9839a11 1661 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1662 }
1663
75d262c2 1664 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1665 key->bdaddr_type = addr_type;
1666 memcpy(key->val, tk, sizeof(key->val));
1667 key->authenticated = authenticated;
1668 key->ediv = ediv;
1669 key->enc_size = enc_size;
1670 key->type = type;
1671 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1672
c9839a11
VCG
1673 if (!new_key)
1674 return 0;
75d262c2 1675
261cc5aa
VCG
1676 if (type & HCI_SMP_LTK)
1677 mgmt_new_ltk(hdev, key, 1);
1678
75d262c2
VCG
1679 return 0;
1680}
1681
55ed8ca1
JH
1682int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1683{
1684 struct link_key *key;
1685
1686 key = hci_find_link_key(hdev, bdaddr);
1687 if (!key)
1688 return -ENOENT;
1689
6ed93dc6 1690 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1691
1692 list_del(&key->list);
1693 kfree(key);
1694
1695 return 0;
1696}
1697
b899efaf
VCG
1698int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1699{
1700 struct smp_ltk *k, *tmp;
1701
1702 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1703 if (bacmp(bdaddr, &k->bdaddr))
1704 continue;
1705
6ed93dc6 1706 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1707
1708 list_del(&k->list);
1709 kfree(k);
1710 }
1711
1712 return 0;
1713}
1714
6bd32326 1715/* HCI command timer function */
bda4f23a 1716static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1717{
1718 struct hci_dev *hdev = (void *) arg;
1719
bda4f23a
AE
1720 if (hdev->sent_cmd) {
1721 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1722 u16 opcode = __le16_to_cpu(sent->opcode);
1723
1724 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1725 } else {
1726 BT_ERR("%s command tx timeout", hdev->name);
1727 }
1728
6bd32326 1729 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1730 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1731}
1732
2763eda6 1733struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1734 bdaddr_t *bdaddr)
2763eda6
SJ
1735{
1736 struct oob_data *data;
1737
1738 list_for_each_entry(data, &hdev->remote_oob_data, list)
1739 if (bacmp(bdaddr, &data->bdaddr) == 0)
1740 return data;
1741
1742 return NULL;
1743}
1744
1745int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1746{
1747 struct oob_data *data;
1748
1749 data = hci_find_remote_oob_data(hdev, bdaddr);
1750 if (!data)
1751 return -ENOENT;
1752
6ed93dc6 1753 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1754
1755 list_del(&data->list);
1756 kfree(data);
1757
1758 return 0;
1759}
1760
1761int hci_remote_oob_data_clear(struct hci_dev *hdev)
1762{
1763 struct oob_data *data, *n;
1764
1765 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1766 list_del(&data->list);
1767 kfree(data);
1768 }
1769
1770 return 0;
1771}
1772
1773int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1774 u8 *randomizer)
2763eda6
SJ
1775{
1776 struct oob_data *data;
1777
1778 data = hci_find_remote_oob_data(hdev, bdaddr);
1779
1780 if (!data) {
1781 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1782 if (!data)
1783 return -ENOMEM;
1784
1785 bacpy(&data->bdaddr, bdaddr);
1786 list_add(&data->list, &hdev->remote_oob_data);
1787 }
1788
1789 memcpy(data->hash, hash, sizeof(data->hash));
1790 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1791
6ed93dc6 1792 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1793
1794 return 0;
1795}
1796
04124681 1797struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1798{
8035ded4 1799 struct bdaddr_list *b;
b2a66aad 1800
8035ded4 1801 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1802 if (bacmp(bdaddr, &b->bdaddr) == 0)
1803 return b;
b2a66aad
AJ
1804
1805 return NULL;
1806}
1807
1808int hci_blacklist_clear(struct hci_dev *hdev)
1809{
1810 struct list_head *p, *n;
1811
1812 list_for_each_safe(p, n, &hdev->blacklist) {
1813 struct bdaddr_list *b;
1814
1815 b = list_entry(p, struct bdaddr_list, list);
1816
1817 list_del(p);
1818 kfree(b);
1819 }
1820
1821 return 0;
1822}
1823
88c1fe4b 1824int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1825{
1826 struct bdaddr_list *entry;
b2a66aad
AJ
1827
1828 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1829 return -EBADF;
1830
5e762444
AJ
1831 if (hci_blacklist_lookup(hdev, bdaddr))
1832 return -EEXIST;
b2a66aad
AJ
1833
1834 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1835 if (!entry)
1836 return -ENOMEM;
b2a66aad
AJ
1837
1838 bacpy(&entry->bdaddr, bdaddr);
1839
1840 list_add(&entry->list, &hdev->blacklist);
1841
88c1fe4b 1842 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1843}
1844
88c1fe4b 1845int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1846{
1847 struct bdaddr_list *entry;
b2a66aad 1848
1ec918ce 1849 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1850 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1851
1852 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1853 if (!entry)
5e762444 1854 return -ENOENT;
b2a66aad
AJ
1855
1856 list_del(&entry->list);
1857 kfree(entry);
1858
88c1fe4b 1859 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1860}
1861
42c6b129 1862static void le_scan_param_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1863{
1864 struct le_scan_params *param = (struct le_scan_params *) opt;
1865 struct hci_cp_le_set_scan_param cp;
1866
1867 memset(&cp, 0, sizeof(cp));
1868 cp.type = param->type;
1869 cp.interval = cpu_to_le16(param->interval);
1870 cp.window = cpu_to_le16(param->window);
1871
42c6b129 1872 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
7ba8b4be
AG
1873}
1874
42c6b129 1875static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1876{
1877 struct hci_cp_le_set_scan_enable cp;
1878
1879 memset(&cp, 0, sizeof(cp));
1880 cp.enable = 1;
0431a43c 1881 cp.filter_dup = 1;
7ba8b4be 1882
42c6b129 1883 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
7ba8b4be
AG
1884}
1885
1886static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1887 u16 window, int timeout)
7ba8b4be
AG
1888{
1889 long timeo = msecs_to_jiffies(3000);
1890 struct le_scan_params param;
1891 int err;
1892
1893 BT_DBG("%s", hdev->name);
1894
1895 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1896 return -EINPROGRESS;
1897
1898 param.type = type;
1899 param.interval = interval;
1900 param.window = window;
1901
1902 hci_req_lock(hdev);
1903
01178cd4
JH
1904 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1905 timeo);
7ba8b4be 1906 if (!err)
01178cd4 1907 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
1908
1909 hci_req_unlock(hdev);
1910
1911 if (err < 0)
1912 return err;
1913
46818ed5
JH
1914 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1915 msecs_to_jiffies(timeout));
7ba8b4be
AG
1916
1917 return 0;
1918}
1919
7dbfac1d
AG
1920int hci_cancel_le_scan(struct hci_dev *hdev)
1921{
1922 BT_DBG("%s", hdev->name);
1923
1924 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1925 return -EALREADY;
1926
1927 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1928 struct hci_cp_le_set_scan_enable cp;
1929
1930 /* Send HCI command to disable LE Scan */
1931 memset(&cp, 0, sizeof(cp));
1932 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1933 }
1934
1935 return 0;
1936}
1937
7ba8b4be
AG
1938static void le_scan_disable_work(struct work_struct *work)
1939{
1940 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1941 le_scan_disable.work);
7ba8b4be
AG
1942 struct hci_cp_le_set_scan_enable cp;
1943
1944 BT_DBG("%s", hdev->name);
1945
1946 memset(&cp, 0, sizeof(cp));
1947
1948 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1949}
1950
28b75a89
AG
1951static void le_scan_work(struct work_struct *work)
1952{
1953 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1954 struct le_scan_params *param = &hdev->le_scan_params;
1955
1956 BT_DBG("%s", hdev->name);
1957
04124681
GP
1958 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1959 param->timeout);
28b75a89
AG
1960}
1961
1962int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1963 int timeout)
28b75a89
AG
1964{
1965 struct le_scan_params *param = &hdev->le_scan_params;
1966
1967 BT_DBG("%s", hdev->name);
1968
f1550478
JH
1969 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1970 return -ENOTSUPP;
1971
28b75a89
AG
1972 if (work_busy(&hdev->le_scan))
1973 return -EINPROGRESS;
1974
1975 param->type = type;
1976 param->interval = interval;
1977 param->window = window;
1978 param->timeout = timeout;
1979
1980 queue_work(system_long_wq, &hdev->le_scan);
1981
1982 return 0;
1983}
1984
9be0dab7
DH
1985/* Alloc HCI device */
1986struct hci_dev *hci_alloc_dev(void)
1987{
1988 struct hci_dev *hdev;
1989
1990 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1991 if (!hdev)
1992 return NULL;
1993
b1b813d4
DH
1994 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1995 hdev->esco_type = (ESCO_HV1);
1996 hdev->link_mode = (HCI_LM_ACCEPT);
1997 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
1998 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1999 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2000
b1b813d4
DH
2001 hdev->sniff_max_interval = 800;
2002 hdev->sniff_min_interval = 80;
2003
2004 mutex_init(&hdev->lock);
2005 mutex_init(&hdev->req_lock);
2006
2007 INIT_LIST_HEAD(&hdev->mgmt_pending);
2008 INIT_LIST_HEAD(&hdev->blacklist);
2009 INIT_LIST_HEAD(&hdev->uuids);
2010 INIT_LIST_HEAD(&hdev->link_keys);
2011 INIT_LIST_HEAD(&hdev->long_term_keys);
2012 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2013 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2014
2015 INIT_WORK(&hdev->rx_work, hci_rx_work);
2016 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2017 INIT_WORK(&hdev->tx_work, hci_tx_work);
2018 INIT_WORK(&hdev->power_on, hci_power_on);
2019 INIT_WORK(&hdev->le_scan, le_scan_work);
2020
b1b813d4
DH
2021 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2022 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2023 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2024
9be0dab7 2025 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
2026 skb_queue_head_init(&hdev->rx_q);
2027 skb_queue_head_init(&hdev->cmd_q);
2028 skb_queue_head_init(&hdev->raw_q);
2029
2030 init_waitqueue_head(&hdev->req_wait_q);
2031
bda4f23a 2032 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2033
b1b813d4
DH
2034 hci_init_sysfs(hdev);
2035 discovery_init(hdev);
9be0dab7
DH
2036
2037 return hdev;
2038}
2039EXPORT_SYMBOL(hci_alloc_dev);
2040
2041/* Free HCI device */
2042void hci_free_dev(struct hci_dev *hdev)
2043{
2044 skb_queue_purge(&hdev->driver_init);
2045
2046 /* will free via device release */
2047 put_device(&hdev->dev);
2048}
2049EXPORT_SYMBOL(hci_free_dev);
2050
1da177e4
LT
2051/* Register HCI device */
2052int hci_register_dev(struct hci_dev *hdev)
2053{
b1b813d4 2054 int id, error;
1da177e4 2055
010666a1 2056 if (!hdev->open || !hdev->close)
1da177e4
LT
2057 return -EINVAL;
2058
08add513
MM
2059 /* Do not allow HCI_AMP devices to register at index 0,
2060 * so the index can be used as the AMP controller ID.
2061 */
3df92b31
SL
2062 switch (hdev->dev_type) {
2063 case HCI_BREDR:
2064 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2065 break;
2066 case HCI_AMP:
2067 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2068 break;
2069 default:
2070 return -EINVAL;
1da177e4 2071 }
8e87d142 2072
3df92b31
SL
2073 if (id < 0)
2074 return id;
2075
1da177e4
LT
2076 sprintf(hdev->name, "hci%d", id);
2077 hdev->id = id;
2d8b3a11
AE
2078
2079 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2080
3df92b31
SL
2081 write_lock(&hci_dev_list_lock);
2082 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2083 write_unlock(&hci_dev_list_lock);
1da177e4 2084
32845eb1 2085 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 2086 WQ_MEM_RECLAIM, 1);
33ca954d
DH
2087 if (!hdev->workqueue) {
2088 error = -ENOMEM;
2089 goto err;
2090 }
f48fd9c8 2091
6ead1bbc
JH
2092 hdev->req_workqueue = alloc_workqueue(hdev->name,
2093 WQ_HIGHPRI | WQ_UNBOUND |
2094 WQ_MEM_RECLAIM, 1);
2095 if (!hdev->req_workqueue) {
2096 destroy_workqueue(hdev->workqueue);
2097 error = -ENOMEM;
2098 goto err;
2099 }
2100
33ca954d
DH
2101 error = hci_add_sysfs(hdev);
2102 if (error < 0)
2103 goto err_wqueue;
1da177e4 2104
611b30f7 2105 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2106 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2107 hdev);
611b30f7
MH
2108 if (hdev->rfkill) {
2109 if (rfkill_register(hdev->rfkill) < 0) {
2110 rfkill_destroy(hdev->rfkill);
2111 hdev->rfkill = NULL;
2112 }
2113 }
2114
a8b2d5c2 2115 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2116
2117 if (hdev->dev_type != HCI_AMP)
2118 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2119
1da177e4 2120 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2121 hci_dev_hold(hdev);
1da177e4 2122
19202573 2123 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2124
1da177e4 2125 return id;
f48fd9c8 2126
33ca954d
DH
2127err_wqueue:
2128 destroy_workqueue(hdev->workqueue);
6ead1bbc 2129 destroy_workqueue(hdev->req_workqueue);
33ca954d 2130err:
3df92b31 2131 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2132 write_lock(&hci_dev_list_lock);
f48fd9c8 2133 list_del(&hdev->list);
f20d09d5 2134 write_unlock(&hci_dev_list_lock);
f48fd9c8 2135
33ca954d 2136 return error;
1da177e4
LT
2137}
2138EXPORT_SYMBOL(hci_register_dev);
2139
2140/* Unregister HCI device */
59735631 2141void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2142{
3df92b31 2143 int i, id;
ef222013 2144
c13854ce 2145 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2146
94324962
JH
2147 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2148
3df92b31
SL
2149 id = hdev->id;
2150
f20d09d5 2151 write_lock(&hci_dev_list_lock);
1da177e4 2152 list_del(&hdev->list);
f20d09d5 2153 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2154
2155 hci_dev_do_close(hdev);
2156
cd4c5391 2157 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2158 kfree_skb(hdev->reassembly[i]);
2159
b9b5ef18
GP
2160 cancel_work_sync(&hdev->power_on);
2161
ab81cbf9 2162 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2163 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2164 hci_dev_lock(hdev);
744cf19e 2165 mgmt_index_removed(hdev);
09fd0de5 2166 hci_dev_unlock(hdev);
56e5cb86 2167 }
ab81cbf9 2168
2e58ef3e
JH
2169 /* mgmt_index_removed should take care of emptying the
2170 * pending list */
2171 BUG_ON(!list_empty(&hdev->mgmt_pending));
2172
1da177e4
LT
2173 hci_notify(hdev, HCI_DEV_UNREG);
2174
611b30f7
MH
2175 if (hdev->rfkill) {
2176 rfkill_unregister(hdev->rfkill);
2177 rfkill_destroy(hdev->rfkill);
2178 }
2179
ce242970 2180 hci_del_sysfs(hdev);
147e2d59 2181
f48fd9c8 2182 destroy_workqueue(hdev->workqueue);
6ead1bbc 2183 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2184
09fd0de5 2185 hci_dev_lock(hdev);
e2e0cacb 2186 hci_blacklist_clear(hdev);
2aeb9a1a 2187 hci_uuids_clear(hdev);
55ed8ca1 2188 hci_link_keys_clear(hdev);
b899efaf 2189 hci_smp_ltks_clear(hdev);
2763eda6 2190 hci_remote_oob_data_clear(hdev);
09fd0de5 2191 hci_dev_unlock(hdev);
e2e0cacb 2192
dc946bd8 2193 hci_dev_put(hdev);
3df92b31
SL
2194
2195 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2196}
2197EXPORT_SYMBOL(hci_unregister_dev);
2198
2199/* Suspend HCI device */
2200int hci_suspend_dev(struct hci_dev *hdev)
2201{
2202 hci_notify(hdev, HCI_DEV_SUSPEND);
2203 return 0;
2204}
2205EXPORT_SYMBOL(hci_suspend_dev);
2206
2207/* Resume HCI device */
2208int hci_resume_dev(struct hci_dev *hdev)
2209{
2210 hci_notify(hdev, HCI_DEV_RESUME);
2211 return 0;
2212}
2213EXPORT_SYMBOL(hci_resume_dev);
2214
76bca880
MH
2215/* Receive frame from HCI drivers */
2216int hci_recv_frame(struct sk_buff *skb)
2217{
2218 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2219 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2220 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2221 kfree_skb(skb);
2222 return -ENXIO;
2223 }
2224
d82603c6 2225 /* Incoming skb */
76bca880
MH
2226 bt_cb(skb)->incoming = 1;
2227
2228 /* Time stamp */
2229 __net_timestamp(skb);
2230
76bca880 2231 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2232 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2233
76bca880
MH
2234 return 0;
2235}
2236EXPORT_SYMBOL(hci_recv_frame);
2237
33e882a5 2238static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2239 int count, __u8 index)
33e882a5
SS
2240{
2241 int len = 0;
2242 int hlen = 0;
2243 int remain = count;
2244 struct sk_buff *skb;
2245 struct bt_skb_cb *scb;
2246
2247 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2248 index >= NUM_REASSEMBLY)
33e882a5
SS
2249 return -EILSEQ;
2250
2251 skb = hdev->reassembly[index];
2252
2253 if (!skb) {
2254 switch (type) {
2255 case HCI_ACLDATA_PKT:
2256 len = HCI_MAX_FRAME_SIZE;
2257 hlen = HCI_ACL_HDR_SIZE;
2258 break;
2259 case HCI_EVENT_PKT:
2260 len = HCI_MAX_EVENT_SIZE;
2261 hlen = HCI_EVENT_HDR_SIZE;
2262 break;
2263 case HCI_SCODATA_PKT:
2264 len = HCI_MAX_SCO_SIZE;
2265 hlen = HCI_SCO_HDR_SIZE;
2266 break;
2267 }
2268
1e429f38 2269 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2270 if (!skb)
2271 return -ENOMEM;
2272
2273 scb = (void *) skb->cb;
2274 scb->expect = hlen;
2275 scb->pkt_type = type;
2276
2277 skb->dev = (void *) hdev;
2278 hdev->reassembly[index] = skb;
2279 }
2280
2281 while (count) {
2282 scb = (void *) skb->cb;
89bb46d0 2283 len = min_t(uint, scb->expect, count);
33e882a5
SS
2284
2285 memcpy(skb_put(skb, len), data, len);
2286
2287 count -= len;
2288 data += len;
2289 scb->expect -= len;
2290 remain = count;
2291
2292 switch (type) {
2293 case HCI_EVENT_PKT:
2294 if (skb->len == HCI_EVENT_HDR_SIZE) {
2295 struct hci_event_hdr *h = hci_event_hdr(skb);
2296 scb->expect = h->plen;
2297
2298 if (skb_tailroom(skb) < scb->expect) {
2299 kfree_skb(skb);
2300 hdev->reassembly[index] = NULL;
2301 return -ENOMEM;
2302 }
2303 }
2304 break;
2305
2306 case HCI_ACLDATA_PKT:
2307 if (skb->len == HCI_ACL_HDR_SIZE) {
2308 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2309 scb->expect = __le16_to_cpu(h->dlen);
2310
2311 if (skb_tailroom(skb) < scb->expect) {
2312 kfree_skb(skb);
2313 hdev->reassembly[index] = NULL;
2314 return -ENOMEM;
2315 }
2316 }
2317 break;
2318
2319 case HCI_SCODATA_PKT:
2320 if (skb->len == HCI_SCO_HDR_SIZE) {
2321 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2322 scb->expect = h->dlen;
2323
2324 if (skb_tailroom(skb) < scb->expect) {
2325 kfree_skb(skb);
2326 hdev->reassembly[index] = NULL;
2327 return -ENOMEM;
2328 }
2329 }
2330 break;
2331 }
2332
2333 if (scb->expect == 0) {
2334 /* Complete frame */
2335
2336 bt_cb(skb)->pkt_type = type;
2337 hci_recv_frame(skb);
2338
2339 hdev->reassembly[index] = NULL;
2340 return remain;
2341 }
2342 }
2343
2344 return remain;
2345}
2346
ef222013
MH
2347int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2348{
f39a3c06
SS
2349 int rem = 0;
2350
ef222013
MH
2351 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2352 return -EILSEQ;
2353
da5f6c37 2354 while (count) {
1e429f38 2355 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2356 if (rem < 0)
2357 return rem;
ef222013 2358
f39a3c06
SS
2359 data += (count - rem);
2360 count = rem;
f81c6224 2361 }
ef222013 2362
f39a3c06 2363 return rem;
ef222013
MH
2364}
2365EXPORT_SYMBOL(hci_recv_fragment);
2366
99811510
SS
2367#define STREAM_REASSEMBLY 0
2368
2369int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2370{
2371 int type;
2372 int rem = 0;
2373
da5f6c37 2374 while (count) {
99811510
SS
2375 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2376
2377 if (!skb) {
2378 struct { char type; } *pkt;
2379
2380 /* Start of the frame */
2381 pkt = data;
2382 type = pkt->type;
2383
2384 data++;
2385 count--;
2386 } else
2387 type = bt_cb(skb)->pkt_type;
2388
1e429f38 2389 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2390 STREAM_REASSEMBLY);
99811510
SS
2391 if (rem < 0)
2392 return rem;
2393
2394 data += (count - rem);
2395 count = rem;
f81c6224 2396 }
99811510
SS
2397
2398 return rem;
2399}
2400EXPORT_SYMBOL(hci_recv_stream_fragment);
2401
1da177e4
LT
2402/* ---- Interface to upper protocols ---- */
2403
1da177e4
LT
2404int hci_register_cb(struct hci_cb *cb)
2405{
2406 BT_DBG("%p name %s", cb, cb->name);
2407
f20d09d5 2408 write_lock(&hci_cb_list_lock);
1da177e4 2409 list_add(&cb->list, &hci_cb_list);
f20d09d5 2410 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2411
2412 return 0;
2413}
2414EXPORT_SYMBOL(hci_register_cb);
2415
2416int hci_unregister_cb(struct hci_cb *cb)
2417{
2418 BT_DBG("%p name %s", cb, cb->name);
2419
f20d09d5 2420 write_lock(&hci_cb_list_lock);
1da177e4 2421 list_del(&cb->list);
f20d09d5 2422 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2423
2424 return 0;
2425}
2426EXPORT_SYMBOL(hci_unregister_cb);
2427
2428static int hci_send_frame(struct sk_buff *skb)
2429{
2430 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2431
2432 if (!hdev) {
2433 kfree_skb(skb);
2434 return -ENODEV;
2435 }
2436
0d48d939 2437 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2438
cd82e61c
MH
2439 /* Time stamp */
2440 __net_timestamp(skb);
1da177e4 2441
cd82e61c
MH
2442 /* Send copy to monitor */
2443 hci_send_to_monitor(hdev, skb);
2444
2445 if (atomic_read(&hdev->promisc)) {
2446 /* Send copy to the sockets */
470fe1b5 2447 hci_send_to_sock(hdev, skb);
1da177e4
LT
2448 }
2449
2450 /* Get rid of skb owner, prior to sending to the driver. */
2451 skb_orphan(skb);
2452
2453 return hdev->send(skb);
2454}
2455
3119ae95
JH
2456void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2457{
2458 skb_queue_head_init(&req->cmd_q);
2459 req->hdev = hdev;
5d73e034 2460 req->err = 0;
3119ae95
JH
2461}
2462
2463int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2464{
2465 struct hci_dev *hdev = req->hdev;
2466 struct sk_buff *skb;
2467 unsigned long flags;
2468
2469 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2470
5d73e034
AG
2471 /* If an error occured during request building, remove all HCI
2472 * commands queued on the HCI request queue.
2473 */
2474 if (req->err) {
2475 skb_queue_purge(&req->cmd_q);
2476 return req->err;
2477 }
2478
3119ae95
JH
2479 /* Do not allow empty requests */
2480 if (skb_queue_empty(&req->cmd_q))
382b0c39 2481 return -ENODATA;
3119ae95
JH
2482
2483 skb = skb_peek_tail(&req->cmd_q);
2484 bt_cb(skb)->req.complete = complete;
2485
2486 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2487 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2488 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2489
2490 queue_work(hdev->workqueue, &hdev->cmd_work);
2491
2492 return 0;
2493}
2494
1ca3a9d0
JH
2495static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2496 u32 plen, void *param)
1da177e4
LT
2497{
2498 int len = HCI_COMMAND_HDR_SIZE + plen;
2499 struct hci_command_hdr *hdr;
2500 struct sk_buff *skb;
2501
1da177e4 2502 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2503 if (!skb)
2504 return NULL;
1da177e4
LT
2505
2506 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2507 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2508 hdr->plen = plen;
2509
2510 if (plen)
2511 memcpy(skb_put(skb, plen), param, plen);
2512
2513 BT_DBG("skb len %d", skb->len);
2514
0d48d939 2515 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2516 skb->dev = (void *) hdev;
c78ae283 2517
1ca3a9d0
JH
2518 return skb;
2519}
2520
2521/* Send HCI command */
2522int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2523{
2524 struct sk_buff *skb;
2525
2526 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2527
2528 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2529 if (!skb) {
2530 BT_ERR("%s no memory for command", hdev->name);
2531 return -ENOMEM;
2532 }
2533
11714b3d
JH
2534 /* Stand-alone HCI commands must be flaged as
2535 * single-command requests.
2536 */
2537 bt_cb(skb)->req.start = true;
2538
1da177e4 2539 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2540 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2541
2542 return 0;
2543}
1da177e4 2544
71c76a17 2545/* Queue a command to an asynchronous HCI request */
e348fe6b 2546void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
71c76a17
JH
2547{
2548 struct hci_dev *hdev = req->hdev;
2549 struct sk_buff *skb;
2550
2551 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2552
34739c1e
AG
2553 /* If an error occured during request building, there is no point in
2554 * queueing the HCI command. We can simply return.
2555 */
2556 if (req->err)
2557 return;
2558
71c76a17
JH
2559 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2560 if (!skb) {
5d73e034
AG
2561 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2562 hdev->name, opcode);
2563 req->err = -ENOMEM;
e348fe6b 2564 return;
71c76a17
JH
2565 }
2566
2567 if (skb_queue_empty(&req->cmd_q))
2568 bt_cb(skb)->req.start = true;
2569
2570 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2571}
2572
1da177e4 2573/* Get data from the previously sent command */
a9de9248 2574void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2575{
2576 struct hci_command_hdr *hdr;
2577
2578 if (!hdev->sent_cmd)
2579 return NULL;
2580
2581 hdr = (void *) hdev->sent_cmd->data;
2582
a9de9248 2583 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2584 return NULL;
2585
f0e09510 2586 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2587
2588 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2589}
2590
2591/* Send ACL data */
2592static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2593{
2594 struct hci_acl_hdr *hdr;
2595 int len = skb->len;
2596
badff6d0
ACM
2597 skb_push(skb, HCI_ACL_HDR_SIZE);
2598 skb_reset_transport_header(skb);
9c70220b 2599 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2600 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2601 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2602}
2603
ee22be7e 2604static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2605 struct sk_buff *skb, __u16 flags)
1da177e4 2606{
ee22be7e 2607 struct hci_conn *conn = chan->conn;
1da177e4
LT
2608 struct hci_dev *hdev = conn->hdev;
2609 struct sk_buff *list;
2610
087bfd99
GP
2611 skb->len = skb_headlen(skb);
2612 skb->data_len = 0;
2613
2614 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2615
2616 switch (hdev->dev_type) {
2617 case HCI_BREDR:
2618 hci_add_acl_hdr(skb, conn->handle, flags);
2619 break;
2620 case HCI_AMP:
2621 hci_add_acl_hdr(skb, chan->handle, flags);
2622 break;
2623 default:
2624 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2625 return;
2626 }
087bfd99 2627
70f23020
AE
2628 list = skb_shinfo(skb)->frag_list;
2629 if (!list) {
1da177e4
LT
2630 /* Non fragmented */
2631 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2632
73d80deb 2633 skb_queue_tail(queue, skb);
1da177e4
LT
2634 } else {
2635 /* Fragmented */
2636 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2637
2638 skb_shinfo(skb)->frag_list = NULL;
2639
2640 /* Queue all fragments atomically */
af3e6359 2641 spin_lock(&queue->lock);
1da177e4 2642
73d80deb 2643 __skb_queue_tail(queue, skb);
e702112f
AE
2644
2645 flags &= ~ACL_START;
2646 flags |= ACL_CONT;
1da177e4
LT
2647 do {
2648 skb = list; list = list->next;
8e87d142 2649
1da177e4 2650 skb->dev = (void *) hdev;
0d48d939 2651 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2652 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2653
2654 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2655
73d80deb 2656 __skb_queue_tail(queue, skb);
1da177e4
LT
2657 } while (list);
2658
af3e6359 2659 spin_unlock(&queue->lock);
1da177e4 2660 }
73d80deb
LAD
2661}
2662
2663void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2664{
ee22be7e 2665 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2666
f0e09510 2667 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2668
2669 skb->dev = (void *) hdev;
73d80deb 2670
ee22be7e 2671 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2672
3eff45ea 2673 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2674}
1da177e4
LT
2675
2676/* Send SCO data */
0d861d8b 2677void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2678{
2679 struct hci_dev *hdev = conn->hdev;
2680 struct hci_sco_hdr hdr;
2681
2682 BT_DBG("%s len %d", hdev->name, skb->len);
2683
aca3192c 2684 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2685 hdr.dlen = skb->len;
2686
badff6d0
ACM
2687 skb_push(skb, HCI_SCO_HDR_SIZE);
2688 skb_reset_transport_header(skb);
9c70220b 2689 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2690
2691 skb->dev = (void *) hdev;
0d48d939 2692 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2693
1da177e4 2694 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2695 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2696}
1da177e4
LT
2697
2698/* ---- HCI TX task (outgoing data) ---- */
2699
2700/* HCI Connection scheduler */
6039aa73
GP
2701static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2702 int *quote)
1da177e4
LT
2703{
2704 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2705 struct hci_conn *conn = NULL, *c;
abc5de8f 2706 unsigned int num = 0, min = ~0;
1da177e4 2707
8e87d142 2708 /* We don't have to lock device here. Connections are always
1da177e4 2709 * added and removed with TX task disabled. */
bf4c6325
GP
2710
2711 rcu_read_lock();
2712
2713 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2714 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2715 continue;
769be974
MH
2716
2717 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2718 continue;
2719
1da177e4
LT
2720 num++;
2721
2722 if (c->sent < min) {
2723 min = c->sent;
2724 conn = c;
2725 }
52087a79
LAD
2726
2727 if (hci_conn_num(hdev, type) == num)
2728 break;
1da177e4
LT
2729 }
2730
bf4c6325
GP
2731 rcu_read_unlock();
2732
1da177e4 2733 if (conn) {
6ed58ec5
VT
2734 int cnt, q;
2735
2736 switch (conn->type) {
2737 case ACL_LINK:
2738 cnt = hdev->acl_cnt;
2739 break;
2740 case SCO_LINK:
2741 case ESCO_LINK:
2742 cnt = hdev->sco_cnt;
2743 break;
2744 case LE_LINK:
2745 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2746 break;
2747 default:
2748 cnt = 0;
2749 BT_ERR("Unknown link type");
2750 }
2751
2752 q = cnt / num;
1da177e4
LT
2753 *quote = q ? q : 1;
2754 } else
2755 *quote = 0;
2756
2757 BT_DBG("conn %p quote %d", conn, *quote);
2758 return conn;
2759}
2760
6039aa73 2761static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2762{
2763 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2764 struct hci_conn *c;
1da177e4 2765
bae1f5d9 2766 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2767
bf4c6325
GP
2768 rcu_read_lock();
2769
1da177e4 2770 /* Kill stalled connections */
bf4c6325 2771 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2772 if (c->type == type && c->sent) {
6ed93dc6
AE
2773 BT_ERR("%s killing stalled connection %pMR",
2774 hdev->name, &c->dst);
bed71748 2775 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2776 }
2777 }
bf4c6325
GP
2778
2779 rcu_read_unlock();
1da177e4
LT
2780}
2781
6039aa73
GP
2782static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2783 int *quote)
1da177e4 2784{
73d80deb
LAD
2785 struct hci_conn_hash *h = &hdev->conn_hash;
2786 struct hci_chan *chan = NULL;
abc5de8f 2787 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2788 struct hci_conn *conn;
73d80deb
LAD
2789 int cnt, q, conn_num = 0;
2790
2791 BT_DBG("%s", hdev->name);
2792
bf4c6325
GP
2793 rcu_read_lock();
2794
2795 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2796 struct hci_chan *tmp;
2797
2798 if (conn->type != type)
2799 continue;
2800
2801 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2802 continue;
2803
2804 conn_num++;
2805
8192edef 2806 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2807 struct sk_buff *skb;
2808
2809 if (skb_queue_empty(&tmp->data_q))
2810 continue;
2811
2812 skb = skb_peek(&tmp->data_q);
2813 if (skb->priority < cur_prio)
2814 continue;
2815
2816 if (skb->priority > cur_prio) {
2817 num = 0;
2818 min = ~0;
2819 cur_prio = skb->priority;
2820 }
2821
2822 num++;
2823
2824 if (conn->sent < min) {
2825 min = conn->sent;
2826 chan = tmp;
2827 }
2828 }
2829
2830 if (hci_conn_num(hdev, type) == conn_num)
2831 break;
2832 }
2833
bf4c6325
GP
2834 rcu_read_unlock();
2835
73d80deb
LAD
2836 if (!chan)
2837 return NULL;
2838
2839 switch (chan->conn->type) {
2840 case ACL_LINK:
2841 cnt = hdev->acl_cnt;
2842 break;
bd1eb66b
AE
2843 case AMP_LINK:
2844 cnt = hdev->block_cnt;
2845 break;
73d80deb
LAD
2846 case SCO_LINK:
2847 case ESCO_LINK:
2848 cnt = hdev->sco_cnt;
2849 break;
2850 case LE_LINK:
2851 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2852 break;
2853 default:
2854 cnt = 0;
2855 BT_ERR("Unknown link type");
2856 }
2857
2858 q = cnt / num;
2859 *quote = q ? q : 1;
2860 BT_DBG("chan %p quote %d", chan, *quote);
2861 return chan;
2862}
2863
02b20f0b
LAD
2864static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2865{
2866 struct hci_conn_hash *h = &hdev->conn_hash;
2867 struct hci_conn *conn;
2868 int num = 0;
2869
2870 BT_DBG("%s", hdev->name);
2871
bf4c6325
GP
2872 rcu_read_lock();
2873
2874 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2875 struct hci_chan *chan;
2876
2877 if (conn->type != type)
2878 continue;
2879
2880 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2881 continue;
2882
2883 num++;
2884
8192edef 2885 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2886 struct sk_buff *skb;
2887
2888 if (chan->sent) {
2889 chan->sent = 0;
2890 continue;
2891 }
2892
2893 if (skb_queue_empty(&chan->data_q))
2894 continue;
2895
2896 skb = skb_peek(&chan->data_q);
2897 if (skb->priority >= HCI_PRIO_MAX - 1)
2898 continue;
2899
2900 skb->priority = HCI_PRIO_MAX - 1;
2901
2902 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2903 skb->priority);
02b20f0b
LAD
2904 }
2905
2906 if (hci_conn_num(hdev, type) == num)
2907 break;
2908 }
bf4c6325
GP
2909
2910 rcu_read_unlock();
2911
02b20f0b
LAD
2912}
2913
b71d385a
AE
2914static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2915{
2916 /* Calculate count of blocks used by this packet */
2917 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2918}
2919
6039aa73 2920static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2921{
1da177e4
LT
2922 if (!test_bit(HCI_RAW, &hdev->flags)) {
2923 /* ACL tx timeout must be longer than maximum
2924 * link supervision timeout (40.9 seconds) */
63d2bc1b 2925 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2926 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2927 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2928 }
63d2bc1b 2929}
1da177e4 2930
6039aa73 2931static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2932{
2933 unsigned int cnt = hdev->acl_cnt;
2934 struct hci_chan *chan;
2935 struct sk_buff *skb;
2936 int quote;
2937
2938 __check_timeout(hdev, cnt);
04837f64 2939
73d80deb 2940 while (hdev->acl_cnt &&
a8c5fb1a 2941 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2942 u32 priority = (skb_peek(&chan->data_q))->priority;
2943 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2944 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2945 skb->len, skb->priority);
73d80deb 2946
ec1cce24
LAD
2947 /* Stop if priority has changed */
2948 if (skb->priority < priority)
2949 break;
2950
2951 skb = skb_dequeue(&chan->data_q);
2952
73d80deb 2953 hci_conn_enter_active_mode(chan->conn,
04124681 2954 bt_cb(skb)->force_active);
04837f64 2955
1da177e4
LT
2956 hci_send_frame(skb);
2957 hdev->acl_last_tx = jiffies;
2958
2959 hdev->acl_cnt--;
73d80deb
LAD
2960 chan->sent++;
2961 chan->conn->sent++;
1da177e4
LT
2962 }
2963 }
02b20f0b
LAD
2964
2965 if (cnt != hdev->acl_cnt)
2966 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2967}
2968
6039aa73 2969static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2970{
63d2bc1b 2971 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2972 struct hci_chan *chan;
2973 struct sk_buff *skb;
2974 int quote;
bd1eb66b 2975 u8 type;
b71d385a 2976
63d2bc1b 2977 __check_timeout(hdev, cnt);
b71d385a 2978
bd1eb66b
AE
2979 BT_DBG("%s", hdev->name);
2980
2981 if (hdev->dev_type == HCI_AMP)
2982 type = AMP_LINK;
2983 else
2984 type = ACL_LINK;
2985
b71d385a 2986 while (hdev->block_cnt > 0 &&
bd1eb66b 2987 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
2988 u32 priority = (skb_peek(&chan->data_q))->priority;
2989 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2990 int blocks;
2991
2992 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2993 skb->len, skb->priority);
b71d385a
AE
2994
2995 /* Stop if priority has changed */
2996 if (skb->priority < priority)
2997 break;
2998
2999 skb = skb_dequeue(&chan->data_q);
3000
3001 blocks = __get_blocks(hdev, skb);
3002 if (blocks > hdev->block_cnt)
3003 return;
3004
3005 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3006 bt_cb(skb)->force_active);
b71d385a
AE
3007
3008 hci_send_frame(skb);
3009 hdev->acl_last_tx = jiffies;
3010
3011 hdev->block_cnt -= blocks;
3012 quote -= blocks;
3013
3014 chan->sent += blocks;
3015 chan->conn->sent += blocks;
3016 }
3017 }
3018
3019 if (cnt != hdev->block_cnt)
bd1eb66b 3020 hci_prio_recalculate(hdev, type);
b71d385a
AE
3021}
3022
6039aa73 3023static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3024{
3025 BT_DBG("%s", hdev->name);
3026
bd1eb66b
AE
3027 /* No ACL link over BR/EDR controller */
3028 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3029 return;
3030
3031 /* No AMP link over AMP controller */
3032 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3033 return;
3034
3035 switch (hdev->flow_ctl_mode) {
3036 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3037 hci_sched_acl_pkt(hdev);
3038 break;
3039
3040 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3041 hci_sched_acl_blk(hdev);
3042 break;
3043 }
3044}
3045
1da177e4 3046/* Schedule SCO */
6039aa73 3047static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3048{
3049 struct hci_conn *conn;
3050 struct sk_buff *skb;
3051 int quote;
3052
3053 BT_DBG("%s", hdev->name);
3054
52087a79
LAD
3055 if (!hci_conn_num(hdev, SCO_LINK))
3056 return;
3057
1da177e4
LT
3058 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3059 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3060 BT_DBG("skb %p len %d", skb, skb->len);
3061 hci_send_frame(skb);
3062
3063 conn->sent++;
3064 if (conn->sent == ~0)
3065 conn->sent = 0;
3066 }
3067 }
3068}
3069
6039aa73 3070static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3071{
3072 struct hci_conn *conn;
3073 struct sk_buff *skb;
3074 int quote;
3075
3076 BT_DBG("%s", hdev->name);
3077
52087a79
LAD
3078 if (!hci_conn_num(hdev, ESCO_LINK))
3079 return;
3080
8fc9ced3
GP
3081 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3082 &quote))) {
b6a0dc82
MH
3083 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3084 BT_DBG("skb %p len %d", skb, skb->len);
3085 hci_send_frame(skb);
3086
3087 conn->sent++;
3088 if (conn->sent == ~0)
3089 conn->sent = 0;
3090 }
3091 }
3092}
3093
6039aa73 3094static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3095{
73d80deb 3096 struct hci_chan *chan;
6ed58ec5 3097 struct sk_buff *skb;
02b20f0b 3098 int quote, cnt, tmp;
6ed58ec5
VT
3099
3100 BT_DBG("%s", hdev->name);
3101
52087a79
LAD
3102 if (!hci_conn_num(hdev, LE_LINK))
3103 return;
3104
6ed58ec5
VT
3105 if (!test_bit(HCI_RAW, &hdev->flags)) {
3106 /* LE tx timeout must be longer than maximum
3107 * link supervision timeout (40.9 seconds) */
bae1f5d9 3108 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3109 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3110 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3111 }
3112
3113 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3114 tmp = cnt;
73d80deb 3115 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3116 u32 priority = (skb_peek(&chan->data_q))->priority;
3117 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3118 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3119 skb->len, skb->priority);
6ed58ec5 3120
ec1cce24
LAD
3121 /* Stop if priority has changed */
3122 if (skb->priority < priority)
3123 break;
3124
3125 skb = skb_dequeue(&chan->data_q);
3126
6ed58ec5
VT
3127 hci_send_frame(skb);
3128 hdev->le_last_tx = jiffies;
3129
3130 cnt--;
73d80deb
LAD
3131 chan->sent++;
3132 chan->conn->sent++;
6ed58ec5
VT
3133 }
3134 }
73d80deb 3135
6ed58ec5
VT
3136 if (hdev->le_pkts)
3137 hdev->le_cnt = cnt;
3138 else
3139 hdev->acl_cnt = cnt;
02b20f0b
LAD
3140
3141 if (cnt != tmp)
3142 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3143}
3144
3eff45ea 3145static void hci_tx_work(struct work_struct *work)
1da177e4 3146{
3eff45ea 3147 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3148 struct sk_buff *skb;
3149
6ed58ec5 3150 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3151 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3152
3153 /* Schedule queues and send stuff to HCI driver */
3154
3155 hci_sched_acl(hdev);
3156
3157 hci_sched_sco(hdev);
3158
b6a0dc82
MH
3159 hci_sched_esco(hdev);
3160
6ed58ec5
VT
3161 hci_sched_le(hdev);
3162
1da177e4
LT
3163 /* Send next queued raw (unknown type) packet */
3164 while ((skb = skb_dequeue(&hdev->raw_q)))
3165 hci_send_frame(skb);
1da177e4
LT
3166}
3167
25985edc 3168/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3169
3170/* ACL data packet */
6039aa73 3171static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3172{
3173 struct hci_acl_hdr *hdr = (void *) skb->data;
3174 struct hci_conn *conn;
3175 __u16 handle, flags;
3176
3177 skb_pull(skb, HCI_ACL_HDR_SIZE);
3178
3179 handle = __le16_to_cpu(hdr->handle);
3180 flags = hci_flags(handle);
3181 handle = hci_handle(handle);
3182
f0e09510 3183 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3184 handle, flags);
1da177e4
LT
3185
3186 hdev->stat.acl_rx++;
3187
3188 hci_dev_lock(hdev);
3189 conn = hci_conn_hash_lookup_handle(hdev, handle);
3190 hci_dev_unlock(hdev);
8e87d142 3191
1da177e4 3192 if (conn) {
65983fc7 3193 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3194
1da177e4 3195 /* Send to upper protocol */
686ebf28
UF
3196 l2cap_recv_acldata(conn, skb, flags);
3197 return;
1da177e4 3198 } else {
8e87d142 3199 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3200 hdev->name, handle);
1da177e4
LT
3201 }
3202
3203 kfree_skb(skb);
3204}
3205
3206/* SCO data packet */
6039aa73 3207static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3208{
3209 struct hci_sco_hdr *hdr = (void *) skb->data;
3210 struct hci_conn *conn;
3211 __u16 handle;
3212
3213 skb_pull(skb, HCI_SCO_HDR_SIZE);
3214
3215 handle = __le16_to_cpu(hdr->handle);
3216
f0e09510 3217 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3218
3219 hdev->stat.sco_rx++;
3220
3221 hci_dev_lock(hdev);
3222 conn = hci_conn_hash_lookup_handle(hdev, handle);
3223 hci_dev_unlock(hdev);
3224
3225 if (conn) {
1da177e4 3226 /* Send to upper protocol */
686ebf28
UF
3227 sco_recv_scodata(conn, skb);
3228 return;
1da177e4 3229 } else {
8e87d142 3230 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3231 hdev->name, handle);
1da177e4
LT
3232 }
3233
3234 kfree_skb(skb);
3235}
3236
9238f36a
JH
3237static bool hci_req_is_complete(struct hci_dev *hdev)
3238{
3239 struct sk_buff *skb;
3240
3241 skb = skb_peek(&hdev->cmd_q);
3242 if (!skb)
3243 return true;
3244
3245 return bt_cb(skb)->req.start;
3246}
3247
42c6b129
JH
3248static void hci_resend_last(struct hci_dev *hdev)
3249{
3250 struct hci_command_hdr *sent;
3251 struct sk_buff *skb;
3252 u16 opcode;
3253
3254 if (!hdev->sent_cmd)
3255 return;
3256
3257 sent = (void *) hdev->sent_cmd->data;
3258 opcode = __le16_to_cpu(sent->opcode);
3259 if (opcode == HCI_OP_RESET)
3260 return;
3261
3262 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3263 if (!skb)
3264 return;
3265
3266 skb_queue_head(&hdev->cmd_q, skb);
3267 queue_work(hdev->workqueue, &hdev->cmd_work);
3268}
3269
9238f36a
JH
3270void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3271{
3272 hci_req_complete_t req_complete = NULL;
3273 struct sk_buff *skb;
3274 unsigned long flags;
3275
3276 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3277
42c6b129
JH
3278 /* If the completed command doesn't match the last one that was
3279 * sent we need to do special handling of it.
9238f36a 3280 */
42c6b129
JH
3281 if (!hci_sent_cmd_data(hdev, opcode)) {
3282 /* Some CSR based controllers generate a spontaneous
3283 * reset complete event during init and any pending
3284 * command will never be completed. In such a case we
3285 * need to resend whatever was the last sent
3286 * command.
3287 */
3288 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3289 hci_resend_last(hdev);
3290
9238f36a 3291 return;
42c6b129 3292 }
9238f36a
JH
3293
3294 /* If the command succeeded and there's still more commands in
3295 * this request the request is not yet complete.
3296 */
3297 if (!status && !hci_req_is_complete(hdev))
3298 return;
3299
3300 /* If this was the last command in a request the complete
3301 * callback would be found in hdev->sent_cmd instead of the
3302 * command queue (hdev->cmd_q).
3303 */
3304 if (hdev->sent_cmd) {
3305 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3306 if (req_complete)
3307 goto call_complete;
3308 }
3309
3310 /* Remove all pending commands belonging to this request */
3311 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3312 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3313 if (bt_cb(skb)->req.start) {
3314 __skb_queue_head(&hdev->cmd_q, skb);
3315 break;
3316 }
3317
3318 req_complete = bt_cb(skb)->req.complete;
3319 kfree_skb(skb);
3320 }
3321 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3322
3323call_complete:
3324 if (req_complete)
3325 req_complete(hdev, status);
3326}
3327
b78752cc 3328static void hci_rx_work(struct work_struct *work)
1da177e4 3329{
b78752cc 3330 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3331 struct sk_buff *skb;
3332
3333 BT_DBG("%s", hdev->name);
3334
1da177e4 3335 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3336 /* Send copy to monitor */
3337 hci_send_to_monitor(hdev, skb);
3338
1da177e4
LT
3339 if (atomic_read(&hdev->promisc)) {
3340 /* Send copy to the sockets */
470fe1b5 3341 hci_send_to_sock(hdev, skb);
1da177e4
LT
3342 }
3343
3344 if (test_bit(HCI_RAW, &hdev->flags)) {
3345 kfree_skb(skb);
3346 continue;
3347 }
3348
3349 if (test_bit(HCI_INIT, &hdev->flags)) {
3350 /* Don't process data packets in this states. */
0d48d939 3351 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3352 case HCI_ACLDATA_PKT:
3353 case HCI_SCODATA_PKT:
3354 kfree_skb(skb);
3355 continue;
3ff50b79 3356 }
1da177e4
LT
3357 }
3358
3359 /* Process frame */
0d48d939 3360 switch (bt_cb(skb)->pkt_type) {
1da177e4 3361 case HCI_EVENT_PKT:
b78752cc 3362 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3363 hci_event_packet(hdev, skb);
3364 break;
3365
3366 case HCI_ACLDATA_PKT:
3367 BT_DBG("%s ACL data packet", hdev->name);
3368 hci_acldata_packet(hdev, skb);
3369 break;
3370
3371 case HCI_SCODATA_PKT:
3372 BT_DBG("%s SCO data packet", hdev->name);
3373 hci_scodata_packet(hdev, skb);
3374 break;
3375
3376 default:
3377 kfree_skb(skb);
3378 break;
3379 }
3380 }
1da177e4
LT
3381}
3382
c347b765 3383static void hci_cmd_work(struct work_struct *work)
1da177e4 3384{
c347b765 3385 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3386 struct sk_buff *skb;
3387
2104786b
AE
3388 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3389 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3390
1da177e4 3391 /* Send queued commands */
5a08ecce
AE
3392 if (atomic_read(&hdev->cmd_cnt)) {
3393 skb = skb_dequeue(&hdev->cmd_q);
3394 if (!skb)
3395 return;
3396
7585b97a 3397 kfree_skb(hdev->sent_cmd);
1da177e4 3398
70f23020
AE
3399 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3400 if (hdev->sent_cmd) {
1da177e4
LT
3401 atomic_dec(&hdev->cmd_cnt);
3402 hci_send_frame(skb);
7bdb8a5c
SJ
3403 if (test_bit(HCI_RESET, &hdev->flags))
3404 del_timer(&hdev->cmd_timer);
3405 else
3406 mod_timer(&hdev->cmd_timer,
5f246e89 3407 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3408 } else {
3409 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3410 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3411 }
3412 }
3413}
2519a1fc
AG
3414
3415int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3416{
3417 /* General inquiry access code (GIAC) */
3418 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3419 struct hci_cp_inquiry cp;
3420
3421 BT_DBG("%s", hdev->name);
3422
3423 if (test_bit(HCI_INQUIRY, &hdev->flags))
3424 return -EINPROGRESS;
3425
4663262c
JH
3426 inquiry_cache_flush(hdev);
3427
2519a1fc
AG
3428 memset(&cp, 0, sizeof(cp));
3429 memcpy(&cp.lap, lap, sizeof(cp.lap));
3430 cp.length = length;
3431
3432 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3433}
023d5049
AG
3434
3435int hci_cancel_inquiry(struct hci_dev *hdev)
3436{
3437 BT_DBG("%s", hdev->name);
3438
3439 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 3440 return -EALREADY;
023d5049
AG
3441
3442 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3443}
31f7956c
AG
3444
3445u8 bdaddr_to_le(u8 bdaddr_type)
3446{
3447 switch (bdaddr_type) {
3448 case BDADDR_LE_PUBLIC:
3449 return ADDR_LE_DEV_PUBLIC;
3450
3451 default:
3452 /* Fallback to LE Random address type */
3453 return ADDR_LE_DEV_RANDOM;
3454 }
3455}