Bluetooth: Use async requests internally in hci_req_sync
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
01178cd4 83static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
84 void (*func)(struct hci_request *req,
85 unsigned long opt),
01178cd4 86 unsigned long opt, __u32 timeout)
1da177e4 87{
42c6b129 88 struct hci_request req;
1da177e4
LT
89 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
42c6b129
JH
94 hci_req_init(&req, hdev);
95
1da177e4
LT
96 hdev->req_status = HCI_REQ_PEND;
97
98 add_wait_queue(&hdev->req_wait_q, &wait);
99 set_current_state(TASK_INTERRUPTIBLE);
100
42c6b129 101 func(&req, opt);
53cce22d 102
42c6b129
JH
103 err = hci_req_run(&req, hci_req_sync_complete);
104 if (err < 0) {
53cce22d
JH
105 hdev->req_status = 0;
106 remove_wait_queue(&hdev->req_wait_q, &wait);
42c6b129
JH
107 /* req_run will fail if the request did not add any
108 * commands to the queue, something that can happen when
109 * a request with conditionals doesn't trigger any
110 * commands to be sent. This is normal behavior and
111 * should not trigger an error return.
112 */
113 return 0;
53cce22d
JH
114 }
115
1da177e4
LT
116 schedule_timeout(timeout);
117
118 remove_wait_queue(&hdev->req_wait_q, &wait);
119
120 if (signal_pending(current))
121 return -EINTR;
122
123 switch (hdev->req_status) {
124 case HCI_REQ_DONE:
e175072f 125 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
126 break;
127
128 case HCI_REQ_CANCELED:
129 err = -hdev->req_result;
130 break;
131
132 default:
133 err = -ETIMEDOUT;
134 break;
3ff50b79 135 }
1da177e4 136
a5040efa 137 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
138
139 BT_DBG("%s end: err %d", hdev->name, err);
140
141 return err;
142}
143
01178cd4 144static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
145 void (*req)(struct hci_request *req,
146 unsigned long opt),
01178cd4 147 unsigned long opt, __u32 timeout)
1da177e4
LT
148{
149 int ret;
150
7c6a329e
MH
151 if (!test_bit(HCI_UP, &hdev->flags))
152 return -ENETDOWN;
153
1da177e4
LT
154 /* Serialize all requests */
155 hci_req_lock(hdev);
01178cd4 156 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
157 hci_req_unlock(hdev);
158
159 return ret;
160}
161
42c6b129 162static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 163{
42c6b129 164 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
165
166 /* Reset device */
42c6b129
JH
167 set_bit(HCI_RESET, &req->hdev->flags);
168 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
169}
170
42c6b129 171static void bredr_init(struct hci_request *req)
1da177e4 172{
42c6b129 173 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 174
1da177e4 175 /* Read Local Supported Features */
42c6b129 176 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 177
1143e5a6 178 /* Read Local Version */
42c6b129 179 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
180
181 /* Read BD Address */
42c6b129 182 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
183}
184
42c6b129 185static void amp_init(struct hci_request *req)
e61ef499 186{
42c6b129 187 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 188
e61ef499 189 /* Read Local Version */
42c6b129 190 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
191
192 /* Read Local AMP Info */
42c6b129 193 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
194
195 /* Read Data Blk size */
42c6b129 196 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
197}
198
42c6b129 199static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 200{
42c6b129
JH
201 struct hci_dev *hdev = req->hdev;
202 struct hci_request init_req;
e61ef499
AE
203 struct sk_buff *skb;
204
205 BT_DBG("%s %ld", hdev->name, opt);
206
207 /* Driver initialization */
208
42c6b129
JH
209 hci_req_init(&init_req, hdev);
210
e61ef499
AE
211 /* Special commands */
212 while ((skb = skb_dequeue(&hdev->driver_init))) {
213 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
214 skb->dev = (void *) hdev;
215
42c6b129
JH
216 if (skb_queue_empty(&init_req.cmd_q))
217 bt_cb(skb)->req.start = true;
218
219 skb_queue_tail(&init_req.cmd_q, skb);
e61ef499
AE
220 }
221 skb_queue_purge(&hdev->driver_init);
222
42c6b129
JH
223 hci_req_run(&init_req, NULL);
224
11778716
AE
225 /* Reset */
226 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 227 hci_reset_req(req, 0);
11778716 228
e61ef499
AE
229 switch (hdev->dev_type) {
230 case HCI_BREDR:
42c6b129 231 bredr_init(req);
e61ef499
AE
232 break;
233
234 case HCI_AMP:
42c6b129 235 amp_init(req);
e61ef499
AE
236 break;
237
238 default:
239 BT_ERR("Unknown device type %d", hdev->dev_type);
240 break;
241 }
e61ef499
AE
242}
243
42c6b129 244static void bredr_setup(struct hci_request *req)
2177bab5
JH
245{
246 struct hci_cp_delete_stored_link_key cp;
247 __le16 param;
248 __u8 flt_type;
249
250 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 251 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
252
253 /* Read Class of Device */
42c6b129 254 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
255
256 /* Read Local Name */
42c6b129 257 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
258
259 /* Read Voice Setting */
42c6b129 260 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
261
262 /* Clear Event Filters */
263 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 264 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
265
266 /* Connection accept timeout ~20 secs */
267 param = __constant_cpu_to_le16(0x7d00);
42c6b129 268 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
269
270 bacpy(&cp.bdaddr, BDADDR_ANY);
271 cp.delete_all = 0x01;
42c6b129 272 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
2177bab5
JH
273}
274
42c6b129 275static void le_setup(struct hci_request *req)
2177bab5
JH
276{
277 /* Read LE Buffer Size */
42c6b129 278 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
279
280 /* Read LE Local Supported Features */
42c6b129 281 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
282
283 /* Read LE Advertising Channel TX Power */
42c6b129 284 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
285
286 /* Read LE White List Size */
42c6b129 287 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
288
289 /* Read LE Supported States */
42c6b129 290 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
2177bab5
JH
291}
292
293static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
294{
295 if (lmp_ext_inq_capable(hdev))
296 return 0x02;
297
298 if (lmp_inq_rssi_capable(hdev))
299 return 0x01;
300
301 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
302 hdev->lmp_subver == 0x0757)
303 return 0x01;
304
305 if (hdev->manufacturer == 15) {
306 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
307 return 0x01;
308 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
309 return 0x01;
310 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
311 return 0x01;
312 }
313
314 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
315 hdev->lmp_subver == 0x1805)
316 return 0x01;
317
318 return 0x00;
319}
320
42c6b129 321static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
322{
323 u8 mode;
324
42c6b129 325 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 326
42c6b129 327 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
328}
329
42c6b129 330static void hci_setup_event_mask(struct hci_request *req)
2177bab5 331{
42c6b129
JH
332 struct hci_dev *hdev = req->hdev;
333
2177bab5
JH
334 /* The second byte is 0xff instead of 0x9f (two reserved bits
335 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
336 * command otherwise.
337 */
338 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
339
340 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
341 * any event mask for pre 1.2 devices.
342 */
343 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
344 return;
345
346 if (lmp_bredr_capable(hdev)) {
347 events[4] |= 0x01; /* Flow Specification Complete */
348 events[4] |= 0x02; /* Inquiry Result with RSSI */
349 events[4] |= 0x04; /* Read Remote Extended Features Complete */
350 events[5] |= 0x08; /* Synchronous Connection Complete */
351 events[5] |= 0x10; /* Synchronous Connection Changed */
352 }
353
354 if (lmp_inq_rssi_capable(hdev))
355 events[4] |= 0x02; /* Inquiry Result with RSSI */
356
357 if (lmp_sniffsubr_capable(hdev))
358 events[5] |= 0x20; /* Sniff Subrating */
359
360 if (lmp_pause_enc_capable(hdev))
361 events[5] |= 0x80; /* Encryption Key Refresh Complete */
362
363 if (lmp_ext_inq_capable(hdev))
364 events[5] |= 0x40; /* Extended Inquiry Result */
365
366 if (lmp_no_flush_capable(hdev))
367 events[7] |= 0x01; /* Enhanced Flush Complete */
368
369 if (lmp_lsto_capable(hdev))
370 events[6] |= 0x80; /* Link Supervision Timeout Changed */
371
372 if (lmp_ssp_capable(hdev)) {
373 events[6] |= 0x01; /* IO Capability Request */
374 events[6] |= 0x02; /* IO Capability Response */
375 events[6] |= 0x04; /* User Confirmation Request */
376 events[6] |= 0x08; /* User Passkey Request */
377 events[6] |= 0x10; /* Remote OOB Data Request */
378 events[6] |= 0x20; /* Simple Pairing Complete */
379 events[7] |= 0x04; /* User Passkey Notification */
380 events[7] |= 0x08; /* Keypress Notification */
381 events[7] |= 0x10; /* Remote Host Supported
382 * Features Notification
383 */
384 }
385
386 if (lmp_le_capable(hdev))
387 events[7] |= 0x20; /* LE Meta-Event */
388
42c6b129 389 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
390
391 if (lmp_le_capable(hdev)) {
392 memset(events, 0, sizeof(events));
393 events[0] = 0x1f;
42c6b129
JH
394 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
395 sizeof(events), events);
2177bab5
JH
396 }
397}
398
42c6b129 399static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 400{
42c6b129
JH
401 struct hci_dev *hdev = req->hdev;
402
2177bab5 403 if (lmp_bredr_capable(hdev))
42c6b129 404 bredr_setup(req);
2177bab5
JH
405
406 if (lmp_le_capable(hdev))
42c6b129 407 le_setup(req);
2177bab5 408
42c6b129 409 hci_setup_event_mask(req);
2177bab5
JH
410
411 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 412 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
413
414 if (lmp_ssp_capable(hdev)) {
415 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
416 u8 mode = 0x01;
42c6b129
JH
417 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
418 sizeof(mode), &mode);
2177bab5
JH
419 } else {
420 struct hci_cp_write_eir cp;
421
422 memset(hdev->eir, 0, sizeof(hdev->eir));
423 memset(&cp, 0, sizeof(cp));
424
42c6b129 425 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
426 }
427 }
428
429 if (lmp_inq_rssi_capable(hdev))
42c6b129 430 hci_setup_inquiry_mode(req);
2177bab5
JH
431
432 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 433 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
434
435 if (lmp_ext_feat_capable(hdev)) {
436 struct hci_cp_read_local_ext_features cp;
437
438 cp.page = 0x01;
42c6b129
JH
439 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
440 sizeof(cp), &cp);
2177bab5
JH
441 }
442
443 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
444 u8 enable = 1;
42c6b129
JH
445 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
446 &enable);
2177bab5
JH
447 }
448}
449
42c6b129 450static void hci_setup_link_policy(struct hci_request *req)
2177bab5 451{
42c6b129 452 struct hci_dev *hdev = req->hdev;
2177bab5
JH
453 struct hci_cp_write_def_link_policy cp;
454 u16 link_policy = 0;
455
456 if (lmp_rswitch_capable(hdev))
457 link_policy |= HCI_LP_RSWITCH;
458 if (lmp_hold_capable(hdev))
459 link_policy |= HCI_LP_HOLD;
460 if (lmp_sniff_capable(hdev))
461 link_policy |= HCI_LP_SNIFF;
462 if (lmp_park_capable(hdev))
463 link_policy |= HCI_LP_PARK;
464
465 cp.policy = cpu_to_le16(link_policy);
42c6b129 466 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
467}
468
42c6b129 469static void hci_set_le_support(struct hci_request *req)
2177bab5 470{
42c6b129 471 struct hci_dev *hdev = req->hdev;
2177bab5
JH
472 struct hci_cp_write_le_host_supported cp;
473
474 memset(&cp, 0, sizeof(cp));
475
476 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
477 cp.le = 0x01;
478 cp.simul = lmp_le_br_capable(hdev);
479 }
480
481 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
482 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
483 &cp);
2177bab5
JH
484}
485
42c6b129 486static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 487{
42c6b129
JH
488 struct hci_dev *hdev = req->hdev;
489
2177bab5 490 if (hdev->commands[5] & 0x10)
42c6b129 491 hci_setup_link_policy(req);
2177bab5
JH
492
493 if (lmp_le_capable(hdev))
42c6b129 494 hci_set_le_support(req);
2177bab5
JH
495}
496
497static int __hci_init(struct hci_dev *hdev)
498{
499 int err;
500
501 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
502 if (err < 0)
503 return err;
504
505 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
506 * BR/EDR/LE type controllers. AMP controllers only need the
507 * first stage init.
508 */
509 if (hdev->dev_type != HCI_BREDR)
510 return 0;
511
512 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
513 if (err < 0)
514 return err;
515
516 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
517}
518
42c6b129 519static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
520{
521 __u8 scan = opt;
522
42c6b129 523 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
524
525 /* Inquiry and Page scans */
42c6b129 526 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
527}
528
42c6b129 529static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
530{
531 __u8 auth = opt;
532
42c6b129 533 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
534
535 /* Authentication */
42c6b129 536 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
537}
538
42c6b129 539static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
540{
541 __u8 encrypt = opt;
542
42c6b129 543 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 544
e4e8e37c 545 /* Encryption */
42c6b129 546 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
547}
548
42c6b129 549static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
550{
551 __le16 policy = cpu_to_le16(opt);
552
42c6b129 553 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
554
555 /* Default link policy */
42c6b129 556 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
557}
558
8e87d142 559/* Get HCI device by index.
1da177e4
LT
560 * Device is held on return. */
561struct hci_dev *hci_dev_get(int index)
562{
8035ded4 563 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
564
565 BT_DBG("%d", index);
566
567 if (index < 0)
568 return NULL;
569
570 read_lock(&hci_dev_list_lock);
8035ded4 571 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
572 if (d->id == index) {
573 hdev = hci_dev_hold(d);
574 break;
575 }
576 }
577 read_unlock(&hci_dev_list_lock);
578 return hdev;
579}
1da177e4
LT
580
581/* ---- Inquiry support ---- */
ff9ef578 582
30dc78e1
JH
583bool hci_discovery_active(struct hci_dev *hdev)
584{
585 struct discovery_state *discov = &hdev->discovery;
586
6fbe195d 587 switch (discov->state) {
343f935b 588 case DISCOVERY_FINDING:
6fbe195d 589 case DISCOVERY_RESOLVING:
30dc78e1
JH
590 return true;
591
6fbe195d
AG
592 default:
593 return false;
594 }
30dc78e1
JH
595}
596
ff9ef578
JH
597void hci_discovery_set_state(struct hci_dev *hdev, int state)
598{
599 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
600
601 if (hdev->discovery.state == state)
602 return;
603
604 switch (state) {
605 case DISCOVERY_STOPPED:
7b99b659
AG
606 if (hdev->discovery.state != DISCOVERY_STARTING)
607 mgmt_discovering(hdev, 0);
ff9ef578
JH
608 break;
609 case DISCOVERY_STARTING:
610 break;
343f935b 611 case DISCOVERY_FINDING:
ff9ef578
JH
612 mgmt_discovering(hdev, 1);
613 break;
30dc78e1
JH
614 case DISCOVERY_RESOLVING:
615 break;
ff9ef578
JH
616 case DISCOVERY_STOPPING:
617 break;
618 }
619
620 hdev->discovery.state = state;
621}
622
1da177e4
LT
623static void inquiry_cache_flush(struct hci_dev *hdev)
624{
30883512 625 struct discovery_state *cache = &hdev->discovery;
b57c1a56 626 struct inquiry_entry *p, *n;
1da177e4 627
561aafbc
JH
628 list_for_each_entry_safe(p, n, &cache->all, all) {
629 list_del(&p->all);
b57c1a56 630 kfree(p);
1da177e4 631 }
561aafbc
JH
632
633 INIT_LIST_HEAD(&cache->unknown);
634 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
635}
636
a8c5fb1a
GP
637struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
638 bdaddr_t *bdaddr)
1da177e4 639{
30883512 640 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
641 struct inquiry_entry *e;
642
6ed93dc6 643 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 644
561aafbc
JH
645 list_for_each_entry(e, &cache->all, all) {
646 if (!bacmp(&e->data.bdaddr, bdaddr))
647 return e;
648 }
649
650 return NULL;
651}
652
653struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 654 bdaddr_t *bdaddr)
561aafbc 655{
30883512 656 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
657 struct inquiry_entry *e;
658
6ed93dc6 659 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
660
661 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 662 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
663 return e;
664 }
665
666 return NULL;
1da177e4
LT
667}
668
30dc78e1 669struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
670 bdaddr_t *bdaddr,
671 int state)
30dc78e1
JH
672{
673 struct discovery_state *cache = &hdev->discovery;
674 struct inquiry_entry *e;
675
6ed93dc6 676 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
677
678 list_for_each_entry(e, &cache->resolve, list) {
679 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
680 return e;
681 if (!bacmp(&e->data.bdaddr, bdaddr))
682 return e;
683 }
684
685 return NULL;
686}
687
a3d4e20a 688void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 689 struct inquiry_entry *ie)
a3d4e20a
JH
690{
691 struct discovery_state *cache = &hdev->discovery;
692 struct list_head *pos = &cache->resolve;
693 struct inquiry_entry *p;
694
695 list_del(&ie->list);
696
697 list_for_each_entry(p, &cache->resolve, list) {
698 if (p->name_state != NAME_PENDING &&
a8c5fb1a 699 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
700 break;
701 pos = &p->list;
702 }
703
704 list_add(&ie->list, pos);
705}
706
3175405b 707bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 708 bool name_known, bool *ssp)
1da177e4 709{
30883512 710 struct discovery_state *cache = &hdev->discovery;
70f23020 711 struct inquiry_entry *ie;
1da177e4 712
6ed93dc6 713 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 714
2b2fec4d
SJ
715 hci_remove_remote_oob_data(hdev, &data->bdaddr);
716
388fc8fa
JH
717 if (ssp)
718 *ssp = data->ssp_mode;
719
70f23020 720 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 721 if (ie) {
388fc8fa
JH
722 if (ie->data.ssp_mode && ssp)
723 *ssp = true;
724
a3d4e20a 725 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 726 data->rssi != ie->data.rssi) {
a3d4e20a
JH
727 ie->data.rssi = data->rssi;
728 hci_inquiry_cache_update_resolve(hdev, ie);
729 }
730
561aafbc 731 goto update;
a3d4e20a 732 }
561aafbc
JH
733
734 /* Entry not in the cache. Add new one. */
735 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
736 if (!ie)
3175405b 737 return false;
561aafbc
JH
738
739 list_add(&ie->all, &cache->all);
740
741 if (name_known) {
742 ie->name_state = NAME_KNOWN;
743 } else {
744 ie->name_state = NAME_NOT_KNOWN;
745 list_add(&ie->list, &cache->unknown);
746 }
70f23020 747
561aafbc
JH
748update:
749 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 750 ie->name_state != NAME_PENDING) {
561aafbc
JH
751 ie->name_state = NAME_KNOWN;
752 list_del(&ie->list);
1da177e4
LT
753 }
754
70f23020
AE
755 memcpy(&ie->data, data, sizeof(*data));
756 ie->timestamp = jiffies;
1da177e4 757 cache->timestamp = jiffies;
3175405b
JH
758
759 if (ie->name_state == NAME_NOT_KNOWN)
760 return false;
761
762 return true;
1da177e4
LT
763}
764
765static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
766{
30883512 767 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
768 struct inquiry_info *info = (struct inquiry_info *) buf;
769 struct inquiry_entry *e;
770 int copied = 0;
771
561aafbc 772 list_for_each_entry(e, &cache->all, all) {
1da177e4 773 struct inquiry_data *data = &e->data;
b57c1a56
JH
774
775 if (copied >= num)
776 break;
777
1da177e4
LT
778 bacpy(&info->bdaddr, &data->bdaddr);
779 info->pscan_rep_mode = data->pscan_rep_mode;
780 info->pscan_period_mode = data->pscan_period_mode;
781 info->pscan_mode = data->pscan_mode;
782 memcpy(info->dev_class, data->dev_class, 3);
783 info->clock_offset = data->clock_offset;
b57c1a56 784
1da177e4 785 info++;
b57c1a56 786 copied++;
1da177e4
LT
787 }
788
789 BT_DBG("cache %p, copied %d", cache, copied);
790 return copied;
791}
792
42c6b129 793static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
794{
795 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 796 struct hci_dev *hdev = req->hdev;
1da177e4
LT
797 struct hci_cp_inquiry cp;
798
799 BT_DBG("%s", hdev->name);
800
801 if (test_bit(HCI_INQUIRY, &hdev->flags))
802 return;
803
804 /* Start Inquiry */
805 memcpy(&cp.lap, &ir->lap, 3);
806 cp.length = ir->length;
807 cp.num_rsp = ir->num_rsp;
42c6b129 808 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
809}
810
811int hci_inquiry(void __user *arg)
812{
813 __u8 __user *ptr = arg;
814 struct hci_inquiry_req ir;
815 struct hci_dev *hdev;
816 int err = 0, do_inquiry = 0, max_rsp;
817 long timeo;
818 __u8 *buf;
819
820 if (copy_from_user(&ir, ptr, sizeof(ir)))
821 return -EFAULT;
822
5a08ecce
AE
823 hdev = hci_dev_get(ir.dev_id);
824 if (!hdev)
1da177e4
LT
825 return -ENODEV;
826
09fd0de5 827 hci_dev_lock(hdev);
8e87d142 828 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 829 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
830 inquiry_cache_flush(hdev);
831 do_inquiry = 1;
832 }
09fd0de5 833 hci_dev_unlock(hdev);
1da177e4 834
04837f64 835 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
836
837 if (do_inquiry) {
01178cd4
JH
838 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
839 timeo);
70f23020
AE
840 if (err < 0)
841 goto done;
842 }
1da177e4 843
8fc9ced3
GP
844 /* for unlimited number of responses we will use buffer with
845 * 255 entries
846 */
1da177e4
LT
847 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
848
849 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
850 * copy it to the user space.
851 */
01df8c31 852 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 853 if (!buf) {
1da177e4
LT
854 err = -ENOMEM;
855 goto done;
856 }
857
09fd0de5 858 hci_dev_lock(hdev);
1da177e4 859 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 860 hci_dev_unlock(hdev);
1da177e4
LT
861
862 BT_DBG("num_rsp %d", ir.num_rsp);
863
864 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
865 ptr += sizeof(ir);
866 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 867 ir.num_rsp))
1da177e4 868 err = -EFAULT;
8e87d142 869 } else
1da177e4
LT
870 err = -EFAULT;
871
872 kfree(buf);
873
874done:
875 hci_dev_put(hdev);
876 return err;
877}
878
3f0f524b
JH
879static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
880{
881 u8 ad_len = 0, flags = 0;
882 size_t name_len;
883
884 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
885 flags |= LE_AD_GENERAL;
886
887 if (!lmp_bredr_capable(hdev))
888 flags |= LE_AD_NO_BREDR;
889
890 if (lmp_le_br_capable(hdev))
891 flags |= LE_AD_SIM_LE_BREDR_CTRL;
892
893 if (lmp_host_le_br_capable(hdev))
894 flags |= LE_AD_SIM_LE_BREDR_HOST;
895
896 if (flags) {
897 BT_DBG("adv flags 0x%02x", flags);
898
899 ptr[0] = 2;
900 ptr[1] = EIR_FLAGS;
901 ptr[2] = flags;
902
903 ad_len += 3;
904 ptr += 3;
905 }
906
907 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
908 ptr[0] = 2;
909 ptr[1] = EIR_TX_POWER;
910 ptr[2] = (u8) hdev->adv_tx_power;
911
912 ad_len += 3;
913 ptr += 3;
914 }
915
916 name_len = strlen(hdev->dev_name);
917 if (name_len > 0) {
918 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
919
920 if (name_len > max_len) {
921 name_len = max_len;
922 ptr[1] = EIR_NAME_SHORT;
923 } else
924 ptr[1] = EIR_NAME_COMPLETE;
925
926 ptr[0] = name_len + 1;
927
928 memcpy(ptr + 2, hdev->dev_name, name_len);
929
930 ad_len += (name_len + 2);
931 ptr += (name_len + 2);
932 }
933
934 return ad_len;
935}
936
937int hci_update_ad(struct hci_dev *hdev)
938{
939 struct hci_cp_le_set_adv_data cp;
940 u8 len;
941 int err;
942
943 hci_dev_lock(hdev);
944
945 if (!lmp_le_capable(hdev)) {
946 err = -EINVAL;
947 goto unlock;
948 }
949
950 memset(&cp, 0, sizeof(cp));
951
952 len = create_ad(hdev, cp.data);
953
954 if (hdev->adv_data_len == len &&
955 memcmp(cp.data, hdev->adv_data, len) == 0) {
956 err = 0;
957 goto unlock;
958 }
959
960 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
961 hdev->adv_data_len = len;
962
963 cp.length = len;
964 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
965
966unlock:
967 hci_dev_unlock(hdev);
968
969 return err;
970}
971
1da177e4
LT
972/* ---- HCI ioctl helpers ---- */
973
974int hci_dev_open(__u16 dev)
975{
976 struct hci_dev *hdev;
977 int ret = 0;
978
5a08ecce
AE
979 hdev = hci_dev_get(dev);
980 if (!hdev)
1da177e4
LT
981 return -ENODEV;
982
983 BT_DBG("%s %p", hdev->name, hdev);
984
985 hci_req_lock(hdev);
986
94324962
JH
987 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
988 ret = -ENODEV;
989 goto done;
990 }
991
611b30f7
MH
992 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
993 ret = -ERFKILL;
994 goto done;
995 }
996
1da177e4
LT
997 if (test_bit(HCI_UP, &hdev->flags)) {
998 ret = -EALREADY;
999 goto done;
1000 }
1001
1002 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1003 set_bit(HCI_RAW, &hdev->flags);
1004
07e3b94a
AE
1005 /* Treat all non BR/EDR controllers as raw devices if
1006 enable_hs is not set */
1007 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
1008 set_bit(HCI_RAW, &hdev->flags);
1009
1da177e4
LT
1010 if (hdev->open(hdev)) {
1011 ret = -EIO;
1012 goto done;
1013 }
1014
1015 if (!test_bit(HCI_RAW, &hdev->flags)) {
1016 atomic_set(&hdev->cmd_cnt, 1);
1017 set_bit(HCI_INIT, &hdev->flags);
a5040efa 1018 hdev->init_last_cmd = 0;
1da177e4 1019
2177bab5 1020 ret = __hci_init(hdev);
1da177e4
LT
1021
1022 clear_bit(HCI_INIT, &hdev->flags);
1023 }
1024
1025 if (!ret) {
1026 hci_dev_hold(hdev);
1027 set_bit(HCI_UP, &hdev->flags);
1028 hci_notify(hdev, HCI_DEV_UP);
3f0f524b 1029 hci_update_ad(hdev);
bb4b2a9a
AE
1030 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1031 mgmt_valid_hdev(hdev)) {
09fd0de5 1032 hci_dev_lock(hdev);
744cf19e 1033 mgmt_powered(hdev, 1);
09fd0de5 1034 hci_dev_unlock(hdev);
56e5cb86 1035 }
8e87d142 1036 } else {
1da177e4 1037 /* Init failed, cleanup */
3eff45ea 1038 flush_work(&hdev->tx_work);
c347b765 1039 flush_work(&hdev->cmd_work);
b78752cc 1040 flush_work(&hdev->rx_work);
1da177e4
LT
1041
1042 skb_queue_purge(&hdev->cmd_q);
1043 skb_queue_purge(&hdev->rx_q);
1044
1045 if (hdev->flush)
1046 hdev->flush(hdev);
1047
1048 if (hdev->sent_cmd) {
1049 kfree_skb(hdev->sent_cmd);
1050 hdev->sent_cmd = NULL;
1051 }
1052
1053 hdev->close(hdev);
1054 hdev->flags = 0;
1055 }
1056
1057done:
1058 hci_req_unlock(hdev);
1059 hci_dev_put(hdev);
1060 return ret;
1061}
1062
1063static int hci_dev_do_close(struct hci_dev *hdev)
1064{
1065 BT_DBG("%s %p", hdev->name, hdev);
1066
28b75a89
AG
1067 cancel_work_sync(&hdev->le_scan);
1068
78c04c0b
VCG
1069 cancel_delayed_work(&hdev->power_off);
1070
1da177e4
LT
1071 hci_req_cancel(hdev, ENODEV);
1072 hci_req_lock(hdev);
1073
1074 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1075 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1076 hci_req_unlock(hdev);
1077 return 0;
1078 }
1079
3eff45ea
GP
1080 /* Flush RX and TX works */
1081 flush_work(&hdev->tx_work);
b78752cc 1082 flush_work(&hdev->rx_work);
1da177e4 1083
16ab91ab 1084 if (hdev->discov_timeout > 0) {
e0f9309f 1085 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1086 hdev->discov_timeout = 0;
5e5282bb 1087 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1088 }
1089
a8b2d5c2 1090 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1091 cancel_delayed_work(&hdev->service_cache);
1092
7ba8b4be
AG
1093 cancel_delayed_work_sync(&hdev->le_scan_disable);
1094
09fd0de5 1095 hci_dev_lock(hdev);
1da177e4
LT
1096 inquiry_cache_flush(hdev);
1097 hci_conn_hash_flush(hdev);
09fd0de5 1098 hci_dev_unlock(hdev);
1da177e4
LT
1099
1100 hci_notify(hdev, HCI_DEV_DOWN);
1101
1102 if (hdev->flush)
1103 hdev->flush(hdev);
1104
1105 /* Reset device */
1106 skb_queue_purge(&hdev->cmd_q);
1107 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1108 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1109 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1110 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1111 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1112 clear_bit(HCI_INIT, &hdev->flags);
1113 }
1114
c347b765
GP
1115 /* flush cmd work */
1116 flush_work(&hdev->cmd_work);
1da177e4
LT
1117
1118 /* Drop queues */
1119 skb_queue_purge(&hdev->rx_q);
1120 skb_queue_purge(&hdev->cmd_q);
1121 skb_queue_purge(&hdev->raw_q);
1122
1123 /* Drop last sent command */
1124 if (hdev->sent_cmd) {
b79f44c1 1125 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1126 kfree_skb(hdev->sent_cmd);
1127 hdev->sent_cmd = NULL;
1128 }
1129
1130 /* After this point our queues are empty
1131 * and no tasks are scheduled. */
1132 hdev->close(hdev);
1133
bb4b2a9a
AE
1134 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1135 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1136 hci_dev_lock(hdev);
1137 mgmt_powered(hdev, 0);
1138 hci_dev_unlock(hdev);
1139 }
5add6af8 1140
1da177e4
LT
1141 /* Clear flags */
1142 hdev->flags = 0;
1143
ced5c338
AE
1144 /* Controller radio is available but is currently powered down */
1145 hdev->amp_status = 0;
1146
e59fda8d 1147 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1148 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1149
1da177e4
LT
1150 hci_req_unlock(hdev);
1151
1152 hci_dev_put(hdev);
1153 return 0;
1154}
1155
1156int hci_dev_close(__u16 dev)
1157{
1158 struct hci_dev *hdev;
1159 int err;
1160
70f23020
AE
1161 hdev = hci_dev_get(dev);
1162 if (!hdev)
1da177e4 1163 return -ENODEV;
8ee56540
MH
1164
1165 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1166 cancel_delayed_work(&hdev->power_off);
1167
1da177e4 1168 err = hci_dev_do_close(hdev);
8ee56540 1169
1da177e4
LT
1170 hci_dev_put(hdev);
1171 return err;
1172}
1173
1174int hci_dev_reset(__u16 dev)
1175{
1176 struct hci_dev *hdev;
1177 int ret = 0;
1178
70f23020
AE
1179 hdev = hci_dev_get(dev);
1180 if (!hdev)
1da177e4
LT
1181 return -ENODEV;
1182
1183 hci_req_lock(hdev);
1da177e4
LT
1184
1185 if (!test_bit(HCI_UP, &hdev->flags))
1186 goto done;
1187
1188 /* Drop queues */
1189 skb_queue_purge(&hdev->rx_q);
1190 skb_queue_purge(&hdev->cmd_q);
1191
09fd0de5 1192 hci_dev_lock(hdev);
1da177e4
LT
1193 inquiry_cache_flush(hdev);
1194 hci_conn_hash_flush(hdev);
09fd0de5 1195 hci_dev_unlock(hdev);
1da177e4
LT
1196
1197 if (hdev->flush)
1198 hdev->flush(hdev);
1199
8e87d142 1200 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1201 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1202
1203 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1204 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1205
1206done:
1da177e4
LT
1207 hci_req_unlock(hdev);
1208 hci_dev_put(hdev);
1209 return ret;
1210}
1211
1212int hci_dev_reset_stat(__u16 dev)
1213{
1214 struct hci_dev *hdev;
1215 int ret = 0;
1216
70f23020
AE
1217 hdev = hci_dev_get(dev);
1218 if (!hdev)
1da177e4
LT
1219 return -ENODEV;
1220
1221 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1222
1223 hci_dev_put(hdev);
1224
1225 return ret;
1226}
1227
1228int hci_dev_cmd(unsigned int cmd, void __user *arg)
1229{
1230 struct hci_dev *hdev;
1231 struct hci_dev_req dr;
1232 int err = 0;
1233
1234 if (copy_from_user(&dr, arg, sizeof(dr)))
1235 return -EFAULT;
1236
70f23020
AE
1237 hdev = hci_dev_get(dr.dev_id);
1238 if (!hdev)
1da177e4
LT
1239 return -ENODEV;
1240
1241 switch (cmd) {
1242 case HCISETAUTH:
01178cd4
JH
1243 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1244 HCI_INIT_TIMEOUT);
1da177e4
LT
1245 break;
1246
1247 case HCISETENCRYPT:
1248 if (!lmp_encrypt_capable(hdev)) {
1249 err = -EOPNOTSUPP;
1250 break;
1251 }
1252
1253 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1254 /* Auth must be enabled first */
01178cd4
JH
1255 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1256 HCI_INIT_TIMEOUT);
1da177e4
LT
1257 if (err)
1258 break;
1259 }
1260
01178cd4
JH
1261 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1262 HCI_INIT_TIMEOUT);
1da177e4
LT
1263 break;
1264
1265 case HCISETSCAN:
01178cd4
JH
1266 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1267 HCI_INIT_TIMEOUT);
1da177e4
LT
1268 break;
1269
1da177e4 1270 case HCISETLINKPOL:
01178cd4
JH
1271 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1272 HCI_INIT_TIMEOUT);
1da177e4
LT
1273 break;
1274
1275 case HCISETLINKMODE:
e4e8e37c
MH
1276 hdev->link_mode = ((__u16) dr.dev_opt) &
1277 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1278 break;
1279
1280 case HCISETPTYPE:
1281 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1282 break;
1283
1284 case HCISETACLMTU:
e4e8e37c
MH
1285 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1286 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1287 break;
1288
1289 case HCISETSCOMTU:
e4e8e37c
MH
1290 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1291 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1292 break;
1293
1294 default:
1295 err = -EINVAL;
1296 break;
1297 }
e4e8e37c 1298
1da177e4
LT
1299 hci_dev_put(hdev);
1300 return err;
1301}
1302
1303int hci_get_dev_list(void __user *arg)
1304{
8035ded4 1305 struct hci_dev *hdev;
1da177e4
LT
1306 struct hci_dev_list_req *dl;
1307 struct hci_dev_req *dr;
1da177e4
LT
1308 int n = 0, size, err;
1309 __u16 dev_num;
1310
1311 if (get_user(dev_num, (__u16 __user *) arg))
1312 return -EFAULT;
1313
1314 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1315 return -EINVAL;
1316
1317 size = sizeof(*dl) + dev_num * sizeof(*dr);
1318
70f23020
AE
1319 dl = kzalloc(size, GFP_KERNEL);
1320 if (!dl)
1da177e4
LT
1321 return -ENOMEM;
1322
1323 dr = dl->dev_req;
1324
f20d09d5 1325 read_lock(&hci_dev_list_lock);
8035ded4 1326 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1327 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1328 cancel_delayed_work(&hdev->power_off);
c542a06c 1329
a8b2d5c2
JH
1330 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1331 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1332
1da177e4
LT
1333 (dr + n)->dev_id = hdev->id;
1334 (dr + n)->dev_opt = hdev->flags;
c542a06c 1335
1da177e4
LT
1336 if (++n >= dev_num)
1337 break;
1338 }
f20d09d5 1339 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1340
1341 dl->dev_num = n;
1342 size = sizeof(*dl) + n * sizeof(*dr);
1343
1344 err = copy_to_user(arg, dl, size);
1345 kfree(dl);
1346
1347 return err ? -EFAULT : 0;
1348}
1349
1350int hci_get_dev_info(void __user *arg)
1351{
1352 struct hci_dev *hdev;
1353 struct hci_dev_info di;
1354 int err = 0;
1355
1356 if (copy_from_user(&di, arg, sizeof(di)))
1357 return -EFAULT;
1358
70f23020
AE
1359 hdev = hci_dev_get(di.dev_id);
1360 if (!hdev)
1da177e4
LT
1361 return -ENODEV;
1362
a8b2d5c2 1363 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1364 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1365
a8b2d5c2
JH
1366 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1367 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1368
1da177e4
LT
1369 strcpy(di.name, hdev->name);
1370 di.bdaddr = hdev->bdaddr;
943da25d 1371 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1372 di.flags = hdev->flags;
1373 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1374 if (lmp_bredr_capable(hdev)) {
1375 di.acl_mtu = hdev->acl_mtu;
1376 di.acl_pkts = hdev->acl_pkts;
1377 di.sco_mtu = hdev->sco_mtu;
1378 di.sco_pkts = hdev->sco_pkts;
1379 } else {
1380 di.acl_mtu = hdev->le_mtu;
1381 di.acl_pkts = hdev->le_pkts;
1382 di.sco_mtu = 0;
1383 di.sco_pkts = 0;
1384 }
1da177e4
LT
1385 di.link_policy = hdev->link_policy;
1386 di.link_mode = hdev->link_mode;
1387
1388 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1389 memcpy(&di.features, &hdev->features, sizeof(di.features));
1390
1391 if (copy_to_user(arg, &di, sizeof(di)))
1392 err = -EFAULT;
1393
1394 hci_dev_put(hdev);
1395
1396 return err;
1397}
1398
1399/* ---- Interface to HCI drivers ---- */
1400
611b30f7
MH
1401static int hci_rfkill_set_block(void *data, bool blocked)
1402{
1403 struct hci_dev *hdev = data;
1404
1405 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1406
1407 if (!blocked)
1408 return 0;
1409
1410 hci_dev_do_close(hdev);
1411
1412 return 0;
1413}
1414
1415static const struct rfkill_ops hci_rfkill_ops = {
1416 .set_block = hci_rfkill_set_block,
1417};
1418
ab81cbf9
JH
1419static void hci_power_on(struct work_struct *work)
1420{
1421 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1422
1423 BT_DBG("%s", hdev->name);
1424
1425 if (hci_dev_open(hdev->id) < 0)
1426 return;
1427
a8b2d5c2 1428 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1429 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1430 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1431
a8b2d5c2 1432 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1433 mgmt_index_added(hdev);
ab81cbf9
JH
1434}
1435
1436static void hci_power_off(struct work_struct *work)
1437{
3243553f 1438 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1439 power_off.work);
ab81cbf9
JH
1440
1441 BT_DBG("%s", hdev->name);
1442
8ee56540 1443 hci_dev_do_close(hdev);
ab81cbf9
JH
1444}
1445
16ab91ab
JH
1446static void hci_discov_off(struct work_struct *work)
1447{
1448 struct hci_dev *hdev;
1449 u8 scan = SCAN_PAGE;
1450
1451 hdev = container_of(work, struct hci_dev, discov_off.work);
1452
1453 BT_DBG("%s", hdev->name);
1454
09fd0de5 1455 hci_dev_lock(hdev);
16ab91ab
JH
1456
1457 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1458
1459 hdev->discov_timeout = 0;
1460
09fd0de5 1461 hci_dev_unlock(hdev);
16ab91ab
JH
1462}
1463
2aeb9a1a
JH
1464int hci_uuids_clear(struct hci_dev *hdev)
1465{
4821002c 1466 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1467
4821002c
JH
1468 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1469 list_del(&uuid->list);
2aeb9a1a
JH
1470 kfree(uuid);
1471 }
1472
1473 return 0;
1474}
1475
55ed8ca1
JH
1476int hci_link_keys_clear(struct hci_dev *hdev)
1477{
1478 struct list_head *p, *n;
1479
1480 list_for_each_safe(p, n, &hdev->link_keys) {
1481 struct link_key *key;
1482
1483 key = list_entry(p, struct link_key, list);
1484
1485 list_del(p);
1486 kfree(key);
1487 }
1488
1489 return 0;
1490}
1491
b899efaf
VCG
1492int hci_smp_ltks_clear(struct hci_dev *hdev)
1493{
1494 struct smp_ltk *k, *tmp;
1495
1496 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1497 list_del(&k->list);
1498 kfree(k);
1499 }
1500
1501 return 0;
1502}
1503
55ed8ca1
JH
1504struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1505{
8035ded4 1506 struct link_key *k;
55ed8ca1 1507
8035ded4 1508 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1509 if (bacmp(bdaddr, &k->bdaddr) == 0)
1510 return k;
55ed8ca1
JH
1511
1512 return NULL;
1513}
1514
745c0ce3 1515static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1516 u8 key_type, u8 old_key_type)
d25e28ab
JH
1517{
1518 /* Legacy key */
1519 if (key_type < 0x03)
745c0ce3 1520 return true;
d25e28ab
JH
1521
1522 /* Debug keys are insecure so don't store them persistently */
1523 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1524 return false;
d25e28ab
JH
1525
1526 /* Changed combination key and there's no previous one */
1527 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1528 return false;
d25e28ab
JH
1529
1530 /* Security mode 3 case */
1531 if (!conn)
745c0ce3 1532 return true;
d25e28ab
JH
1533
1534 /* Neither local nor remote side had no-bonding as requirement */
1535 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1536 return true;
d25e28ab
JH
1537
1538 /* Local side had dedicated bonding as requirement */
1539 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1540 return true;
d25e28ab
JH
1541
1542 /* Remote side had dedicated bonding as requirement */
1543 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1544 return true;
d25e28ab
JH
1545
1546 /* If none of the above criteria match, then don't store the key
1547 * persistently */
745c0ce3 1548 return false;
d25e28ab
JH
1549}
1550
c9839a11 1551struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1552{
c9839a11 1553 struct smp_ltk *k;
75d262c2 1554
c9839a11
VCG
1555 list_for_each_entry(k, &hdev->long_term_keys, list) {
1556 if (k->ediv != ediv ||
a8c5fb1a 1557 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1558 continue;
1559
c9839a11 1560 return k;
75d262c2
VCG
1561 }
1562
1563 return NULL;
1564}
75d262c2 1565
c9839a11 1566struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1567 u8 addr_type)
75d262c2 1568{
c9839a11 1569 struct smp_ltk *k;
75d262c2 1570
c9839a11
VCG
1571 list_for_each_entry(k, &hdev->long_term_keys, list)
1572 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1573 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1574 return k;
1575
1576 return NULL;
1577}
75d262c2 1578
d25e28ab 1579int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1580 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1581{
1582 struct link_key *key, *old_key;
745c0ce3
VA
1583 u8 old_key_type;
1584 bool persistent;
55ed8ca1
JH
1585
1586 old_key = hci_find_link_key(hdev, bdaddr);
1587 if (old_key) {
1588 old_key_type = old_key->type;
1589 key = old_key;
1590 } else {
12adcf3a 1591 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1592 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1593 if (!key)
1594 return -ENOMEM;
1595 list_add(&key->list, &hdev->link_keys);
1596 }
1597
6ed93dc6 1598 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1599
d25e28ab
JH
1600 /* Some buggy controller combinations generate a changed
1601 * combination key for legacy pairing even when there's no
1602 * previous key */
1603 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1604 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1605 type = HCI_LK_COMBINATION;
655fe6ec
JH
1606 if (conn)
1607 conn->key_type = type;
1608 }
d25e28ab 1609
55ed8ca1 1610 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1611 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1612 key->pin_len = pin_len;
1613
b6020ba0 1614 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1615 key->type = old_key_type;
4748fed2
JH
1616 else
1617 key->type = type;
1618
4df378a1
JH
1619 if (!new_key)
1620 return 0;
1621
1622 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1623
744cf19e 1624 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1625
6ec5bcad
VA
1626 if (conn)
1627 conn->flush_key = !persistent;
55ed8ca1
JH
1628
1629 return 0;
1630}
1631
c9839a11 1632int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1633 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1634 ediv, u8 rand[8])
75d262c2 1635{
c9839a11 1636 struct smp_ltk *key, *old_key;
75d262c2 1637
c9839a11
VCG
1638 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1639 return 0;
75d262c2 1640
c9839a11
VCG
1641 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1642 if (old_key)
75d262c2 1643 key = old_key;
c9839a11
VCG
1644 else {
1645 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1646 if (!key)
1647 return -ENOMEM;
c9839a11 1648 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1649 }
1650
75d262c2 1651 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1652 key->bdaddr_type = addr_type;
1653 memcpy(key->val, tk, sizeof(key->val));
1654 key->authenticated = authenticated;
1655 key->ediv = ediv;
1656 key->enc_size = enc_size;
1657 key->type = type;
1658 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1659
c9839a11
VCG
1660 if (!new_key)
1661 return 0;
75d262c2 1662
261cc5aa
VCG
1663 if (type & HCI_SMP_LTK)
1664 mgmt_new_ltk(hdev, key, 1);
1665
75d262c2
VCG
1666 return 0;
1667}
1668
55ed8ca1
JH
1669int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1670{
1671 struct link_key *key;
1672
1673 key = hci_find_link_key(hdev, bdaddr);
1674 if (!key)
1675 return -ENOENT;
1676
6ed93dc6 1677 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1678
1679 list_del(&key->list);
1680 kfree(key);
1681
1682 return 0;
1683}
1684
b899efaf
VCG
1685int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1686{
1687 struct smp_ltk *k, *tmp;
1688
1689 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1690 if (bacmp(bdaddr, &k->bdaddr))
1691 continue;
1692
6ed93dc6 1693 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1694
1695 list_del(&k->list);
1696 kfree(k);
1697 }
1698
1699 return 0;
1700}
1701
6bd32326 1702/* HCI command timer function */
bda4f23a 1703static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1704{
1705 struct hci_dev *hdev = (void *) arg;
1706
bda4f23a
AE
1707 if (hdev->sent_cmd) {
1708 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1709 u16 opcode = __le16_to_cpu(sent->opcode);
1710
1711 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1712 } else {
1713 BT_ERR("%s command tx timeout", hdev->name);
1714 }
1715
6bd32326 1716 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1717 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1718}
1719
2763eda6 1720struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1721 bdaddr_t *bdaddr)
2763eda6
SJ
1722{
1723 struct oob_data *data;
1724
1725 list_for_each_entry(data, &hdev->remote_oob_data, list)
1726 if (bacmp(bdaddr, &data->bdaddr) == 0)
1727 return data;
1728
1729 return NULL;
1730}
1731
1732int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1733{
1734 struct oob_data *data;
1735
1736 data = hci_find_remote_oob_data(hdev, bdaddr);
1737 if (!data)
1738 return -ENOENT;
1739
6ed93dc6 1740 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1741
1742 list_del(&data->list);
1743 kfree(data);
1744
1745 return 0;
1746}
1747
1748int hci_remote_oob_data_clear(struct hci_dev *hdev)
1749{
1750 struct oob_data *data, *n;
1751
1752 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1753 list_del(&data->list);
1754 kfree(data);
1755 }
1756
1757 return 0;
1758}
1759
1760int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1761 u8 *randomizer)
2763eda6
SJ
1762{
1763 struct oob_data *data;
1764
1765 data = hci_find_remote_oob_data(hdev, bdaddr);
1766
1767 if (!data) {
1768 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1769 if (!data)
1770 return -ENOMEM;
1771
1772 bacpy(&data->bdaddr, bdaddr);
1773 list_add(&data->list, &hdev->remote_oob_data);
1774 }
1775
1776 memcpy(data->hash, hash, sizeof(data->hash));
1777 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1778
6ed93dc6 1779 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1780
1781 return 0;
1782}
1783
04124681 1784struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1785{
8035ded4 1786 struct bdaddr_list *b;
b2a66aad 1787
8035ded4 1788 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1789 if (bacmp(bdaddr, &b->bdaddr) == 0)
1790 return b;
b2a66aad
AJ
1791
1792 return NULL;
1793}
1794
1795int hci_blacklist_clear(struct hci_dev *hdev)
1796{
1797 struct list_head *p, *n;
1798
1799 list_for_each_safe(p, n, &hdev->blacklist) {
1800 struct bdaddr_list *b;
1801
1802 b = list_entry(p, struct bdaddr_list, list);
1803
1804 list_del(p);
1805 kfree(b);
1806 }
1807
1808 return 0;
1809}
1810
88c1fe4b 1811int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1812{
1813 struct bdaddr_list *entry;
b2a66aad
AJ
1814
1815 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1816 return -EBADF;
1817
5e762444
AJ
1818 if (hci_blacklist_lookup(hdev, bdaddr))
1819 return -EEXIST;
b2a66aad
AJ
1820
1821 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1822 if (!entry)
1823 return -ENOMEM;
b2a66aad
AJ
1824
1825 bacpy(&entry->bdaddr, bdaddr);
1826
1827 list_add(&entry->list, &hdev->blacklist);
1828
88c1fe4b 1829 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1830}
1831
88c1fe4b 1832int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1833{
1834 struct bdaddr_list *entry;
b2a66aad 1835
1ec918ce 1836 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1837 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1838
1839 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1840 if (!entry)
5e762444 1841 return -ENOENT;
b2a66aad
AJ
1842
1843 list_del(&entry->list);
1844 kfree(entry);
1845
88c1fe4b 1846 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1847}
1848
42c6b129 1849static void le_scan_param_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1850{
1851 struct le_scan_params *param = (struct le_scan_params *) opt;
1852 struct hci_cp_le_set_scan_param cp;
1853
1854 memset(&cp, 0, sizeof(cp));
1855 cp.type = param->type;
1856 cp.interval = cpu_to_le16(param->interval);
1857 cp.window = cpu_to_le16(param->window);
1858
42c6b129 1859 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
7ba8b4be
AG
1860}
1861
42c6b129 1862static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1863{
1864 struct hci_cp_le_set_scan_enable cp;
1865
1866 memset(&cp, 0, sizeof(cp));
1867 cp.enable = 1;
0431a43c 1868 cp.filter_dup = 1;
7ba8b4be 1869
42c6b129 1870 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
7ba8b4be
AG
1871}
1872
1873static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1874 u16 window, int timeout)
7ba8b4be
AG
1875{
1876 long timeo = msecs_to_jiffies(3000);
1877 struct le_scan_params param;
1878 int err;
1879
1880 BT_DBG("%s", hdev->name);
1881
1882 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1883 return -EINPROGRESS;
1884
1885 param.type = type;
1886 param.interval = interval;
1887 param.window = window;
1888
1889 hci_req_lock(hdev);
1890
01178cd4
JH
1891 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1892 timeo);
7ba8b4be 1893 if (!err)
01178cd4 1894 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
1895
1896 hci_req_unlock(hdev);
1897
1898 if (err < 0)
1899 return err;
1900
46818ed5
JH
1901 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1902 msecs_to_jiffies(timeout));
7ba8b4be
AG
1903
1904 return 0;
1905}
1906
7dbfac1d
AG
1907int hci_cancel_le_scan(struct hci_dev *hdev)
1908{
1909 BT_DBG("%s", hdev->name);
1910
1911 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1912 return -EALREADY;
1913
1914 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1915 struct hci_cp_le_set_scan_enable cp;
1916
1917 /* Send HCI command to disable LE Scan */
1918 memset(&cp, 0, sizeof(cp));
1919 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1920 }
1921
1922 return 0;
1923}
1924
7ba8b4be
AG
1925static void le_scan_disable_work(struct work_struct *work)
1926{
1927 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1928 le_scan_disable.work);
7ba8b4be
AG
1929 struct hci_cp_le_set_scan_enable cp;
1930
1931 BT_DBG("%s", hdev->name);
1932
1933 memset(&cp, 0, sizeof(cp));
1934
1935 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1936}
1937
28b75a89
AG
1938static void le_scan_work(struct work_struct *work)
1939{
1940 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1941 struct le_scan_params *param = &hdev->le_scan_params;
1942
1943 BT_DBG("%s", hdev->name);
1944
04124681
GP
1945 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1946 param->timeout);
28b75a89
AG
1947}
1948
1949int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1950 int timeout)
28b75a89
AG
1951{
1952 struct le_scan_params *param = &hdev->le_scan_params;
1953
1954 BT_DBG("%s", hdev->name);
1955
f1550478
JH
1956 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1957 return -ENOTSUPP;
1958
28b75a89
AG
1959 if (work_busy(&hdev->le_scan))
1960 return -EINPROGRESS;
1961
1962 param->type = type;
1963 param->interval = interval;
1964 param->window = window;
1965 param->timeout = timeout;
1966
1967 queue_work(system_long_wq, &hdev->le_scan);
1968
1969 return 0;
1970}
1971
9be0dab7
DH
1972/* Alloc HCI device */
1973struct hci_dev *hci_alloc_dev(void)
1974{
1975 struct hci_dev *hdev;
1976
1977 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1978 if (!hdev)
1979 return NULL;
1980
b1b813d4
DH
1981 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1982 hdev->esco_type = (ESCO_HV1);
1983 hdev->link_mode = (HCI_LM_ACCEPT);
1984 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
1985 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1986 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 1987
b1b813d4
DH
1988 hdev->sniff_max_interval = 800;
1989 hdev->sniff_min_interval = 80;
1990
1991 mutex_init(&hdev->lock);
1992 mutex_init(&hdev->req_lock);
1993
1994 INIT_LIST_HEAD(&hdev->mgmt_pending);
1995 INIT_LIST_HEAD(&hdev->blacklist);
1996 INIT_LIST_HEAD(&hdev->uuids);
1997 INIT_LIST_HEAD(&hdev->link_keys);
1998 INIT_LIST_HEAD(&hdev->long_term_keys);
1999 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2000 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2001
2002 INIT_WORK(&hdev->rx_work, hci_rx_work);
2003 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2004 INIT_WORK(&hdev->tx_work, hci_tx_work);
2005 INIT_WORK(&hdev->power_on, hci_power_on);
2006 INIT_WORK(&hdev->le_scan, le_scan_work);
2007
b1b813d4
DH
2008 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2009 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2010 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2011
9be0dab7 2012 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
2013 skb_queue_head_init(&hdev->rx_q);
2014 skb_queue_head_init(&hdev->cmd_q);
2015 skb_queue_head_init(&hdev->raw_q);
2016
2017 init_waitqueue_head(&hdev->req_wait_q);
2018
bda4f23a 2019 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2020
b1b813d4
DH
2021 hci_init_sysfs(hdev);
2022 discovery_init(hdev);
9be0dab7
DH
2023
2024 return hdev;
2025}
2026EXPORT_SYMBOL(hci_alloc_dev);
2027
2028/* Free HCI device */
2029void hci_free_dev(struct hci_dev *hdev)
2030{
2031 skb_queue_purge(&hdev->driver_init);
2032
2033 /* will free via device release */
2034 put_device(&hdev->dev);
2035}
2036EXPORT_SYMBOL(hci_free_dev);
2037
1da177e4
LT
2038/* Register HCI device */
2039int hci_register_dev(struct hci_dev *hdev)
2040{
b1b813d4 2041 int id, error;
1da177e4 2042
010666a1 2043 if (!hdev->open || !hdev->close)
1da177e4
LT
2044 return -EINVAL;
2045
08add513
MM
2046 /* Do not allow HCI_AMP devices to register at index 0,
2047 * so the index can be used as the AMP controller ID.
2048 */
3df92b31
SL
2049 switch (hdev->dev_type) {
2050 case HCI_BREDR:
2051 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2052 break;
2053 case HCI_AMP:
2054 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2055 break;
2056 default:
2057 return -EINVAL;
1da177e4 2058 }
8e87d142 2059
3df92b31
SL
2060 if (id < 0)
2061 return id;
2062
1da177e4
LT
2063 sprintf(hdev->name, "hci%d", id);
2064 hdev->id = id;
2d8b3a11
AE
2065
2066 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2067
3df92b31
SL
2068 write_lock(&hci_dev_list_lock);
2069 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2070 write_unlock(&hci_dev_list_lock);
1da177e4 2071
32845eb1 2072 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 2073 WQ_MEM_RECLAIM, 1);
33ca954d
DH
2074 if (!hdev->workqueue) {
2075 error = -ENOMEM;
2076 goto err;
2077 }
f48fd9c8 2078
6ead1bbc
JH
2079 hdev->req_workqueue = alloc_workqueue(hdev->name,
2080 WQ_HIGHPRI | WQ_UNBOUND |
2081 WQ_MEM_RECLAIM, 1);
2082 if (!hdev->req_workqueue) {
2083 destroy_workqueue(hdev->workqueue);
2084 error = -ENOMEM;
2085 goto err;
2086 }
2087
33ca954d
DH
2088 error = hci_add_sysfs(hdev);
2089 if (error < 0)
2090 goto err_wqueue;
1da177e4 2091
611b30f7 2092 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2093 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2094 hdev);
611b30f7
MH
2095 if (hdev->rfkill) {
2096 if (rfkill_register(hdev->rfkill) < 0) {
2097 rfkill_destroy(hdev->rfkill);
2098 hdev->rfkill = NULL;
2099 }
2100 }
2101
a8b2d5c2 2102 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2103
2104 if (hdev->dev_type != HCI_AMP)
2105 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2106
1da177e4 2107 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2108 hci_dev_hold(hdev);
1da177e4 2109
19202573 2110 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2111
1da177e4 2112 return id;
f48fd9c8 2113
33ca954d
DH
2114err_wqueue:
2115 destroy_workqueue(hdev->workqueue);
6ead1bbc 2116 destroy_workqueue(hdev->req_workqueue);
33ca954d 2117err:
3df92b31 2118 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2119 write_lock(&hci_dev_list_lock);
f48fd9c8 2120 list_del(&hdev->list);
f20d09d5 2121 write_unlock(&hci_dev_list_lock);
f48fd9c8 2122
33ca954d 2123 return error;
1da177e4
LT
2124}
2125EXPORT_SYMBOL(hci_register_dev);
2126
2127/* Unregister HCI device */
59735631 2128void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2129{
3df92b31 2130 int i, id;
ef222013 2131
c13854ce 2132 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2133
94324962
JH
2134 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2135
3df92b31
SL
2136 id = hdev->id;
2137
f20d09d5 2138 write_lock(&hci_dev_list_lock);
1da177e4 2139 list_del(&hdev->list);
f20d09d5 2140 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2141
2142 hci_dev_do_close(hdev);
2143
cd4c5391 2144 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2145 kfree_skb(hdev->reassembly[i]);
2146
b9b5ef18
GP
2147 cancel_work_sync(&hdev->power_on);
2148
ab81cbf9 2149 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2150 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2151 hci_dev_lock(hdev);
744cf19e 2152 mgmt_index_removed(hdev);
09fd0de5 2153 hci_dev_unlock(hdev);
56e5cb86 2154 }
ab81cbf9 2155
2e58ef3e
JH
2156 /* mgmt_index_removed should take care of emptying the
2157 * pending list */
2158 BUG_ON(!list_empty(&hdev->mgmt_pending));
2159
1da177e4
LT
2160 hci_notify(hdev, HCI_DEV_UNREG);
2161
611b30f7
MH
2162 if (hdev->rfkill) {
2163 rfkill_unregister(hdev->rfkill);
2164 rfkill_destroy(hdev->rfkill);
2165 }
2166
ce242970 2167 hci_del_sysfs(hdev);
147e2d59 2168
f48fd9c8 2169 destroy_workqueue(hdev->workqueue);
6ead1bbc 2170 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2171
09fd0de5 2172 hci_dev_lock(hdev);
e2e0cacb 2173 hci_blacklist_clear(hdev);
2aeb9a1a 2174 hci_uuids_clear(hdev);
55ed8ca1 2175 hci_link_keys_clear(hdev);
b899efaf 2176 hci_smp_ltks_clear(hdev);
2763eda6 2177 hci_remote_oob_data_clear(hdev);
09fd0de5 2178 hci_dev_unlock(hdev);
e2e0cacb 2179
dc946bd8 2180 hci_dev_put(hdev);
3df92b31
SL
2181
2182 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2183}
2184EXPORT_SYMBOL(hci_unregister_dev);
2185
2186/* Suspend HCI device */
2187int hci_suspend_dev(struct hci_dev *hdev)
2188{
2189 hci_notify(hdev, HCI_DEV_SUSPEND);
2190 return 0;
2191}
2192EXPORT_SYMBOL(hci_suspend_dev);
2193
2194/* Resume HCI device */
2195int hci_resume_dev(struct hci_dev *hdev)
2196{
2197 hci_notify(hdev, HCI_DEV_RESUME);
2198 return 0;
2199}
2200EXPORT_SYMBOL(hci_resume_dev);
2201
76bca880
MH
2202/* Receive frame from HCI drivers */
2203int hci_recv_frame(struct sk_buff *skb)
2204{
2205 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2206 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2207 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2208 kfree_skb(skb);
2209 return -ENXIO;
2210 }
2211
d82603c6 2212 /* Incoming skb */
76bca880
MH
2213 bt_cb(skb)->incoming = 1;
2214
2215 /* Time stamp */
2216 __net_timestamp(skb);
2217
76bca880 2218 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2219 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2220
76bca880
MH
2221 return 0;
2222}
2223EXPORT_SYMBOL(hci_recv_frame);
2224
33e882a5 2225static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2226 int count, __u8 index)
33e882a5
SS
2227{
2228 int len = 0;
2229 int hlen = 0;
2230 int remain = count;
2231 struct sk_buff *skb;
2232 struct bt_skb_cb *scb;
2233
2234 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2235 index >= NUM_REASSEMBLY)
33e882a5
SS
2236 return -EILSEQ;
2237
2238 skb = hdev->reassembly[index];
2239
2240 if (!skb) {
2241 switch (type) {
2242 case HCI_ACLDATA_PKT:
2243 len = HCI_MAX_FRAME_SIZE;
2244 hlen = HCI_ACL_HDR_SIZE;
2245 break;
2246 case HCI_EVENT_PKT:
2247 len = HCI_MAX_EVENT_SIZE;
2248 hlen = HCI_EVENT_HDR_SIZE;
2249 break;
2250 case HCI_SCODATA_PKT:
2251 len = HCI_MAX_SCO_SIZE;
2252 hlen = HCI_SCO_HDR_SIZE;
2253 break;
2254 }
2255
1e429f38 2256 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2257 if (!skb)
2258 return -ENOMEM;
2259
2260 scb = (void *) skb->cb;
2261 scb->expect = hlen;
2262 scb->pkt_type = type;
2263
2264 skb->dev = (void *) hdev;
2265 hdev->reassembly[index] = skb;
2266 }
2267
2268 while (count) {
2269 scb = (void *) skb->cb;
89bb46d0 2270 len = min_t(uint, scb->expect, count);
33e882a5
SS
2271
2272 memcpy(skb_put(skb, len), data, len);
2273
2274 count -= len;
2275 data += len;
2276 scb->expect -= len;
2277 remain = count;
2278
2279 switch (type) {
2280 case HCI_EVENT_PKT:
2281 if (skb->len == HCI_EVENT_HDR_SIZE) {
2282 struct hci_event_hdr *h = hci_event_hdr(skb);
2283 scb->expect = h->plen;
2284
2285 if (skb_tailroom(skb) < scb->expect) {
2286 kfree_skb(skb);
2287 hdev->reassembly[index] = NULL;
2288 return -ENOMEM;
2289 }
2290 }
2291 break;
2292
2293 case HCI_ACLDATA_PKT:
2294 if (skb->len == HCI_ACL_HDR_SIZE) {
2295 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2296 scb->expect = __le16_to_cpu(h->dlen);
2297
2298 if (skb_tailroom(skb) < scb->expect) {
2299 kfree_skb(skb);
2300 hdev->reassembly[index] = NULL;
2301 return -ENOMEM;
2302 }
2303 }
2304 break;
2305
2306 case HCI_SCODATA_PKT:
2307 if (skb->len == HCI_SCO_HDR_SIZE) {
2308 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2309 scb->expect = h->dlen;
2310
2311 if (skb_tailroom(skb) < scb->expect) {
2312 kfree_skb(skb);
2313 hdev->reassembly[index] = NULL;
2314 return -ENOMEM;
2315 }
2316 }
2317 break;
2318 }
2319
2320 if (scb->expect == 0) {
2321 /* Complete frame */
2322
2323 bt_cb(skb)->pkt_type = type;
2324 hci_recv_frame(skb);
2325
2326 hdev->reassembly[index] = NULL;
2327 return remain;
2328 }
2329 }
2330
2331 return remain;
2332}
2333
ef222013
MH
2334int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2335{
f39a3c06
SS
2336 int rem = 0;
2337
ef222013
MH
2338 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2339 return -EILSEQ;
2340
da5f6c37 2341 while (count) {
1e429f38 2342 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2343 if (rem < 0)
2344 return rem;
ef222013 2345
f39a3c06
SS
2346 data += (count - rem);
2347 count = rem;
f81c6224 2348 }
ef222013 2349
f39a3c06 2350 return rem;
ef222013
MH
2351}
2352EXPORT_SYMBOL(hci_recv_fragment);
2353
99811510
SS
2354#define STREAM_REASSEMBLY 0
2355
2356int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2357{
2358 int type;
2359 int rem = 0;
2360
da5f6c37 2361 while (count) {
99811510
SS
2362 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2363
2364 if (!skb) {
2365 struct { char type; } *pkt;
2366
2367 /* Start of the frame */
2368 pkt = data;
2369 type = pkt->type;
2370
2371 data++;
2372 count--;
2373 } else
2374 type = bt_cb(skb)->pkt_type;
2375
1e429f38 2376 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2377 STREAM_REASSEMBLY);
99811510
SS
2378 if (rem < 0)
2379 return rem;
2380
2381 data += (count - rem);
2382 count = rem;
f81c6224 2383 }
99811510
SS
2384
2385 return rem;
2386}
2387EXPORT_SYMBOL(hci_recv_stream_fragment);
2388
1da177e4
LT
2389/* ---- Interface to upper protocols ---- */
2390
1da177e4
LT
2391int hci_register_cb(struct hci_cb *cb)
2392{
2393 BT_DBG("%p name %s", cb, cb->name);
2394
f20d09d5 2395 write_lock(&hci_cb_list_lock);
1da177e4 2396 list_add(&cb->list, &hci_cb_list);
f20d09d5 2397 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2398
2399 return 0;
2400}
2401EXPORT_SYMBOL(hci_register_cb);
2402
2403int hci_unregister_cb(struct hci_cb *cb)
2404{
2405 BT_DBG("%p name %s", cb, cb->name);
2406
f20d09d5 2407 write_lock(&hci_cb_list_lock);
1da177e4 2408 list_del(&cb->list);
f20d09d5 2409 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2410
2411 return 0;
2412}
2413EXPORT_SYMBOL(hci_unregister_cb);
2414
2415static int hci_send_frame(struct sk_buff *skb)
2416{
2417 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2418
2419 if (!hdev) {
2420 kfree_skb(skb);
2421 return -ENODEV;
2422 }
2423
0d48d939 2424 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2425
cd82e61c
MH
2426 /* Time stamp */
2427 __net_timestamp(skb);
1da177e4 2428
cd82e61c
MH
2429 /* Send copy to monitor */
2430 hci_send_to_monitor(hdev, skb);
2431
2432 if (atomic_read(&hdev->promisc)) {
2433 /* Send copy to the sockets */
470fe1b5 2434 hci_send_to_sock(hdev, skb);
1da177e4
LT
2435 }
2436
2437 /* Get rid of skb owner, prior to sending to the driver. */
2438 skb_orphan(skb);
2439
2440 return hdev->send(skb);
2441}
2442
3119ae95
JH
2443void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2444{
2445 skb_queue_head_init(&req->cmd_q);
2446 req->hdev = hdev;
2447}
2448
2449int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2450{
2451 struct hci_dev *hdev = req->hdev;
2452 struct sk_buff *skb;
2453 unsigned long flags;
2454
2455 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2456
2457 /* Do not allow empty requests */
2458 if (skb_queue_empty(&req->cmd_q))
2459 return -EINVAL;
2460
2461 skb = skb_peek_tail(&req->cmd_q);
2462 bt_cb(skb)->req.complete = complete;
2463
2464 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2465 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2466 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2467
2468 queue_work(hdev->workqueue, &hdev->cmd_work);
2469
2470 return 0;
2471}
2472
1ca3a9d0
JH
2473static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2474 u32 plen, void *param)
1da177e4
LT
2475{
2476 int len = HCI_COMMAND_HDR_SIZE + plen;
2477 struct hci_command_hdr *hdr;
2478 struct sk_buff *skb;
2479
1da177e4 2480 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2481 if (!skb)
2482 return NULL;
1da177e4
LT
2483
2484 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2485 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2486 hdr->plen = plen;
2487
2488 if (plen)
2489 memcpy(skb_put(skb, plen), param, plen);
2490
2491 BT_DBG("skb len %d", skb->len);
2492
0d48d939 2493 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2494 skb->dev = (void *) hdev;
c78ae283 2495
1ca3a9d0
JH
2496 return skb;
2497}
2498
2499/* Send HCI command */
2500int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2501{
2502 struct sk_buff *skb;
2503
2504 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2505
2506 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2507 if (!skb) {
2508 BT_ERR("%s no memory for command", hdev->name);
2509 return -ENOMEM;
2510 }
2511
a5040efa
JH
2512 if (test_bit(HCI_INIT, &hdev->flags))
2513 hdev->init_last_cmd = opcode;
2514
11714b3d
JH
2515 /* Stand-alone HCI commands must be flaged as
2516 * single-command requests.
2517 */
2518 bt_cb(skb)->req.start = true;
2519
1da177e4 2520 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2521 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2522
2523 return 0;
2524}
1da177e4 2525
71c76a17
JH
2526/* Queue a command to an asynchronous HCI request */
2527int hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2528{
2529 struct hci_dev *hdev = req->hdev;
2530 struct sk_buff *skb;
2531
2532 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2533
2534 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2535 if (!skb) {
2536 BT_ERR("%s no memory for command", hdev->name);
2537 return -ENOMEM;
2538 }
2539
2540 if (skb_queue_empty(&req->cmd_q))
2541 bt_cb(skb)->req.start = true;
2542
2543 skb_queue_tail(&req->cmd_q, skb);
2544
2545 return 0;
2546}
2547
1da177e4 2548/* Get data from the previously sent command */
a9de9248 2549void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2550{
2551 struct hci_command_hdr *hdr;
2552
2553 if (!hdev->sent_cmd)
2554 return NULL;
2555
2556 hdr = (void *) hdev->sent_cmd->data;
2557
a9de9248 2558 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2559 return NULL;
2560
f0e09510 2561 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2562
2563 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2564}
2565
2566/* Send ACL data */
2567static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2568{
2569 struct hci_acl_hdr *hdr;
2570 int len = skb->len;
2571
badff6d0
ACM
2572 skb_push(skb, HCI_ACL_HDR_SIZE);
2573 skb_reset_transport_header(skb);
9c70220b 2574 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2575 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2576 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2577}
2578
ee22be7e 2579static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2580 struct sk_buff *skb, __u16 flags)
1da177e4 2581{
ee22be7e 2582 struct hci_conn *conn = chan->conn;
1da177e4
LT
2583 struct hci_dev *hdev = conn->hdev;
2584 struct sk_buff *list;
2585
087bfd99
GP
2586 skb->len = skb_headlen(skb);
2587 skb->data_len = 0;
2588
2589 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2590
2591 switch (hdev->dev_type) {
2592 case HCI_BREDR:
2593 hci_add_acl_hdr(skb, conn->handle, flags);
2594 break;
2595 case HCI_AMP:
2596 hci_add_acl_hdr(skb, chan->handle, flags);
2597 break;
2598 default:
2599 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2600 return;
2601 }
087bfd99 2602
70f23020
AE
2603 list = skb_shinfo(skb)->frag_list;
2604 if (!list) {
1da177e4
LT
2605 /* Non fragmented */
2606 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2607
73d80deb 2608 skb_queue_tail(queue, skb);
1da177e4
LT
2609 } else {
2610 /* Fragmented */
2611 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2612
2613 skb_shinfo(skb)->frag_list = NULL;
2614
2615 /* Queue all fragments atomically */
af3e6359 2616 spin_lock(&queue->lock);
1da177e4 2617
73d80deb 2618 __skb_queue_tail(queue, skb);
e702112f
AE
2619
2620 flags &= ~ACL_START;
2621 flags |= ACL_CONT;
1da177e4
LT
2622 do {
2623 skb = list; list = list->next;
8e87d142 2624
1da177e4 2625 skb->dev = (void *) hdev;
0d48d939 2626 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2627 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2628
2629 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2630
73d80deb 2631 __skb_queue_tail(queue, skb);
1da177e4
LT
2632 } while (list);
2633
af3e6359 2634 spin_unlock(&queue->lock);
1da177e4 2635 }
73d80deb
LAD
2636}
2637
2638void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2639{
ee22be7e 2640 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2641
f0e09510 2642 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2643
2644 skb->dev = (void *) hdev;
73d80deb 2645
ee22be7e 2646 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2647
3eff45ea 2648 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2649}
1da177e4
LT
2650
2651/* Send SCO data */
0d861d8b 2652void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2653{
2654 struct hci_dev *hdev = conn->hdev;
2655 struct hci_sco_hdr hdr;
2656
2657 BT_DBG("%s len %d", hdev->name, skb->len);
2658
aca3192c 2659 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2660 hdr.dlen = skb->len;
2661
badff6d0
ACM
2662 skb_push(skb, HCI_SCO_HDR_SIZE);
2663 skb_reset_transport_header(skb);
9c70220b 2664 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2665
2666 skb->dev = (void *) hdev;
0d48d939 2667 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2668
1da177e4 2669 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2670 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2671}
1da177e4
LT
2672
2673/* ---- HCI TX task (outgoing data) ---- */
2674
2675/* HCI Connection scheduler */
6039aa73
GP
2676static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2677 int *quote)
1da177e4
LT
2678{
2679 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2680 struct hci_conn *conn = NULL, *c;
abc5de8f 2681 unsigned int num = 0, min = ~0;
1da177e4 2682
8e87d142 2683 /* We don't have to lock device here. Connections are always
1da177e4 2684 * added and removed with TX task disabled. */
bf4c6325
GP
2685
2686 rcu_read_lock();
2687
2688 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2689 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2690 continue;
769be974
MH
2691
2692 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2693 continue;
2694
1da177e4
LT
2695 num++;
2696
2697 if (c->sent < min) {
2698 min = c->sent;
2699 conn = c;
2700 }
52087a79
LAD
2701
2702 if (hci_conn_num(hdev, type) == num)
2703 break;
1da177e4
LT
2704 }
2705
bf4c6325
GP
2706 rcu_read_unlock();
2707
1da177e4 2708 if (conn) {
6ed58ec5
VT
2709 int cnt, q;
2710
2711 switch (conn->type) {
2712 case ACL_LINK:
2713 cnt = hdev->acl_cnt;
2714 break;
2715 case SCO_LINK:
2716 case ESCO_LINK:
2717 cnt = hdev->sco_cnt;
2718 break;
2719 case LE_LINK:
2720 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2721 break;
2722 default:
2723 cnt = 0;
2724 BT_ERR("Unknown link type");
2725 }
2726
2727 q = cnt / num;
1da177e4
LT
2728 *quote = q ? q : 1;
2729 } else
2730 *quote = 0;
2731
2732 BT_DBG("conn %p quote %d", conn, *quote);
2733 return conn;
2734}
2735
6039aa73 2736static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2737{
2738 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2739 struct hci_conn *c;
1da177e4 2740
bae1f5d9 2741 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2742
bf4c6325
GP
2743 rcu_read_lock();
2744
1da177e4 2745 /* Kill stalled connections */
bf4c6325 2746 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2747 if (c->type == type && c->sent) {
6ed93dc6
AE
2748 BT_ERR("%s killing stalled connection %pMR",
2749 hdev->name, &c->dst);
bed71748 2750 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2751 }
2752 }
bf4c6325
GP
2753
2754 rcu_read_unlock();
1da177e4
LT
2755}
2756
6039aa73
GP
2757static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2758 int *quote)
1da177e4 2759{
73d80deb
LAD
2760 struct hci_conn_hash *h = &hdev->conn_hash;
2761 struct hci_chan *chan = NULL;
abc5de8f 2762 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2763 struct hci_conn *conn;
73d80deb
LAD
2764 int cnt, q, conn_num = 0;
2765
2766 BT_DBG("%s", hdev->name);
2767
bf4c6325
GP
2768 rcu_read_lock();
2769
2770 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2771 struct hci_chan *tmp;
2772
2773 if (conn->type != type)
2774 continue;
2775
2776 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2777 continue;
2778
2779 conn_num++;
2780
8192edef 2781 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2782 struct sk_buff *skb;
2783
2784 if (skb_queue_empty(&tmp->data_q))
2785 continue;
2786
2787 skb = skb_peek(&tmp->data_q);
2788 if (skb->priority < cur_prio)
2789 continue;
2790
2791 if (skb->priority > cur_prio) {
2792 num = 0;
2793 min = ~0;
2794 cur_prio = skb->priority;
2795 }
2796
2797 num++;
2798
2799 if (conn->sent < min) {
2800 min = conn->sent;
2801 chan = tmp;
2802 }
2803 }
2804
2805 if (hci_conn_num(hdev, type) == conn_num)
2806 break;
2807 }
2808
bf4c6325
GP
2809 rcu_read_unlock();
2810
73d80deb
LAD
2811 if (!chan)
2812 return NULL;
2813
2814 switch (chan->conn->type) {
2815 case ACL_LINK:
2816 cnt = hdev->acl_cnt;
2817 break;
bd1eb66b
AE
2818 case AMP_LINK:
2819 cnt = hdev->block_cnt;
2820 break;
73d80deb
LAD
2821 case SCO_LINK:
2822 case ESCO_LINK:
2823 cnt = hdev->sco_cnt;
2824 break;
2825 case LE_LINK:
2826 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2827 break;
2828 default:
2829 cnt = 0;
2830 BT_ERR("Unknown link type");
2831 }
2832
2833 q = cnt / num;
2834 *quote = q ? q : 1;
2835 BT_DBG("chan %p quote %d", chan, *quote);
2836 return chan;
2837}
2838
02b20f0b
LAD
2839static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2840{
2841 struct hci_conn_hash *h = &hdev->conn_hash;
2842 struct hci_conn *conn;
2843 int num = 0;
2844
2845 BT_DBG("%s", hdev->name);
2846
bf4c6325
GP
2847 rcu_read_lock();
2848
2849 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2850 struct hci_chan *chan;
2851
2852 if (conn->type != type)
2853 continue;
2854
2855 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2856 continue;
2857
2858 num++;
2859
8192edef 2860 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2861 struct sk_buff *skb;
2862
2863 if (chan->sent) {
2864 chan->sent = 0;
2865 continue;
2866 }
2867
2868 if (skb_queue_empty(&chan->data_q))
2869 continue;
2870
2871 skb = skb_peek(&chan->data_q);
2872 if (skb->priority >= HCI_PRIO_MAX - 1)
2873 continue;
2874
2875 skb->priority = HCI_PRIO_MAX - 1;
2876
2877 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2878 skb->priority);
02b20f0b
LAD
2879 }
2880
2881 if (hci_conn_num(hdev, type) == num)
2882 break;
2883 }
bf4c6325
GP
2884
2885 rcu_read_unlock();
2886
02b20f0b
LAD
2887}
2888
b71d385a
AE
2889static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2890{
2891 /* Calculate count of blocks used by this packet */
2892 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2893}
2894
6039aa73 2895static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2896{
1da177e4
LT
2897 if (!test_bit(HCI_RAW, &hdev->flags)) {
2898 /* ACL tx timeout must be longer than maximum
2899 * link supervision timeout (40.9 seconds) */
63d2bc1b 2900 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2901 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2902 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2903 }
63d2bc1b 2904}
1da177e4 2905
6039aa73 2906static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2907{
2908 unsigned int cnt = hdev->acl_cnt;
2909 struct hci_chan *chan;
2910 struct sk_buff *skb;
2911 int quote;
2912
2913 __check_timeout(hdev, cnt);
04837f64 2914
73d80deb 2915 while (hdev->acl_cnt &&
a8c5fb1a 2916 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2917 u32 priority = (skb_peek(&chan->data_q))->priority;
2918 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2919 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2920 skb->len, skb->priority);
73d80deb 2921
ec1cce24
LAD
2922 /* Stop if priority has changed */
2923 if (skb->priority < priority)
2924 break;
2925
2926 skb = skb_dequeue(&chan->data_q);
2927
73d80deb 2928 hci_conn_enter_active_mode(chan->conn,
04124681 2929 bt_cb(skb)->force_active);
04837f64 2930
1da177e4
LT
2931 hci_send_frame(skb);
2932 hdev->acl_last_tx = jiffies;
2933
2934 hdev->acl_cnt--;
73d80deb
LAD
2935 chan->sent++;
2936 chan->conn->sent++;
1da177e4
LT
2937 }
2938 }
02b20f0b
LAD
2939
2940 if (cnt != hdev->acl_cnt)
2941 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2942}
2943
6039aa73 2944static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2945{
63d2bc1b 2946 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2947 struct hci_chan *chan;
2948 struct sk_buff *skb;
2949 int quote;
bd1eb66b 2950 u8 type;
b71d385a 2951
63d2bc1b 2952 __check_timeout(hdev, cnt);
b71d385a 2953
bd1eb66b
AE
2954 BT_DBG("%s", hdev->name);
2955
2956 if (hdev->dev_type == HCI_AMP)
2957 type = AMP_LINK;
2958 else
2959 type = ACL_LINK;
2960
b71d385a 2961 while (hdev->block_cnt > 0 &&
bd1eb66b 2962 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
2963 u32 priority = (skb_peek(&chan->data_q))->priority;
2964 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2965 int blocks;
2966
2967 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2968 skb->len, skb->priority);
b71d385a
AE
2969
2970 /* Stop if priority has changed */
2971 if (skb->priority < priority)
2972 break;
2973
2974 skb = skb_dequeue(&chan->data_q);
2975
2976 blocks = __get_blocks(hdev, skb);
2977 if (blocks > hdev->block_cnt)
2978 return;
2979
2980 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2981 bt_cb(skb)->force_active);
b71d385a
AE
2982
2983 hci_send_frame(skb);
2984 hdev->acl_last_tx = jiffies;
2985
2986 hdev->block_cnt -= blocks;
2987 quote -= blocks;
2988
2989 chan->sent += blocks;
2990 chan->conn->sent += blocks;
2991 }
2992 }
2993
2994 if (cnt != hdev->block_cnt)
bd1eb66b 2995 hci_prio_recalculate(hdev, type);
b71d385a
AE
2996}
2997
6039aa73 2998static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
2999{
3000 BT_DBG("%s", hdev->name);
3001
bd1eb66b
AE
3002 /* No ACL link over BR/EDR controller */
3003 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3004 return;
3005
3006 /* No AMP link over AMP controller */
3007 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3008 return;
3009
3010 switch (hdev->flow_ctl_mode) {
3011 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3012 hci_sched_acl_pkt(hdev);
3013 break;
3014
3015 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3016 hci_sched_acl_blk(hdev);
3017 break;
3018 }
3019}
3020
1da177e4 3021/* Schedule SCO */
6039aa73 3022static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3023{
3024 struct hci_conn *conn;
3025 struct sk_buff *skb;
3026 int quote;
3027
3028 BT_DBG("%s", hdev->name);
3029
52087a79
LAD
3030 if (!hci_conn_num(hdev, SCO_LINK))
3031 return;
3032
1da177e4
LT
3033 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3034 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3035 BT_DBG("skb %p len %d", skb, skb->len);
3036 hci_send_frame(skb);
3037
3038 conn->sent++;
3039 if (conn->sent == ~0)
3040 conn->sent = 0;
3041 }
3042 }
3043}
3044
6039aa73 3045static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3046{
3047 struct hci_conn *conn;
3048 struct sk_buff *skb;
3049 int quote;
3050
3051 BT_DBG("%s", hdev->name);
3052
52087a79
LAD
3053 if (!hci_conn_num(hdev, ESCO_LINK))
3054 return;
3055
8fc9ced3
GP
3056 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3057 &quote))) {
b6a0dc82
MH
3058 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3059 BT_DBG("skb %p len %d", skb, skb->len);
3060 hci_send_frame(skb);
3061
3062 conn->sent++;
3063 if (conn->sent == ~0)
3064 conn->sent = 0;
3065 }
3066 }
3067}
3068
6039aa73 3069static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3070{
73d80deb 3071 struct hci_chan *chan;
6ed58ec5 3072 struct sk_buff *skb;
02b20f0b 3073 int quote, cnt, tmp;
6ed58ec5
VT
3074
3075 BT_DBG("%s", hdev->name);
3076
52087a79
LAD
3077 if (!hci_conn_num(hdev, LE_LINK))
3078 return;
3079
6ed58ec5
VT
3080 if (!test_bit(HCI_RAW, &hdev->flags)) {
3081 /* LE tx timeout must be longer than maximum
3082 * link supervision timeout (40.9 seconds) */
bae1f5d9 3083 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3084 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3085 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3086 }
3087
3088 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3089 tmp = cnt;
73d80deb 3090 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3091 u32 priority = (skb_peek(&chan->data_q))->priority;
3092 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3093 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3094 skb->len, skb->priority);
6ed58ec5 3095
ec1cce24
LAD
3096 /* Stop if priority has changed */
3097 if (skb->priority < priority)
3098 break;
3099
3100 skb = skb_dequeue(&chan->data_q);
3101
6ed58ec5
VT
3102 hci_send_frame(skb);
3103 hdev->le_last_tx = jiffies;
3104
3105 cnt--;
73d80deb
LAD
3106 chan->sent++;
3107 chan->conn->sent++;
6ed58ec5
VT
3108 }
3109 }
73d80deb 3110
6ed58ec5
VT
3111 if (hdev->le_pkts)
3112 hdev->le_cnt = cnt;
3113 else
3114 hdev->acl_cnt = cnt;
02b20f0b
LAD
3115
3116 if (cnt != tmp)
3117 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3118}
3119
3eff45ea 3120static void hci_tx_work(struct work_struct *work)
1da177e4 3121{
3eff45ea 3122 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3123 struct sk_buff *skb;
3124
6ed58ec5 3125 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3126 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3127
3128 /* Schedule queues and send stuff to HCI driver */
3129
3130 hci_sched_acl(hdev);
3131
3132 hci_sched_sco(hdev);
3133
b6a0dc82
MH
3134 hci_sched_esco(hdev);
3135
6ed58ec5
VT
3136 hci_sched_le(hdev);
3137
1da177e4
LT
3138 /* Send next queued raw (unknown type) packet */
3139 while ((skb = skb_dequeue(&hdev->raw_q)))
3140 hci_send_frame(skb);
1da177e4
LT
3141}
3142
25985edc 3143/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3144
3145/* ACL data packet */
6039aa73 3146static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3147{
3148 struct hci_acl_hdr *hdr = (void *) skb->data;
3149 struct hci_conn *conn;
3150 __u16 handle, flags;
3151
3152 skb_pull(skb, HCI_ACL_HDR_SIZE);
3153
3154 handle = __le16_to_cpu(hdr->handle);
3155 flags = hci_flags(handle);
3156 handle = hci_handle(handle);
3157
f0e09510 3158 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3159 handle, flags);
1da177e4
LT
3160
3161 hdev->stat.acl_rx++;
3162
3163 hci_dev_lock(hdev);
3164 conn = hci_conn_hash_lookup_handle(hdev, handle);
3165 hci_dev_unlock(hdev);
8e87d142 3166
1da177e4 3167 if (conn) {
65983fc7 3168 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3169
1da177e4 3170 /* Send to upper protocol */
686ebf28
UF
3171 l2cap_recv_acldata(conn, skb, flags);
3172 return;
1da177e4 3173 } else {
8e87d142 3174 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3175 hdev->name, handle);
1da177e4
LT
3176 }
3177
3178 kfree_skb(skb);
3179}
3180
3181/* SCO data packet */
6039aa73 3182static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3183{
3184 struct hci_sco_hdr *hdr = (void *) skb->data;
3185 struct hci_conn *conn;
3186 __u16 handle;
3187
3188 skb_pull(skb, HCI_SCO_HDR_SIZE);
3189
3190 handle = __le16_to_cpu(hdr->handle);
3191
f0e09510 3192 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3193
3194 hdev->stat.sco_rx++;
3195
3196 hci_dev_lock(hdev);
3197 conn = hci_conn_hash_lookup_handle(hdev, handle);
3198 hci_dev_unlock(hdev);
3199
3200 if (conn) {
1da177e4 3201 /* Send to upper protocol */
686ebf28
UF
3202 sco_recv_scodata(conn, skb);
3203 return;
1da177e4 3204 } else {
8e87d142 3205 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3206 hdev->name, handle);
1da177e4
LT
3207 }
3208
3209 kfree_skb(skb);
3210}
3211
9238f36a
JH
3212static bool hci_req_is_complete(struct hci_dev *hdev)
3213{
3214 struct sk_buff *skb;
3215
3216 skb = skb_peek(&hdev->cmd_q);
3217 if (!skb)
3218 return true;
3219
3220 return bt_cb(skb)->req.start;
3221}
3222
42c6b129
JH
3223static void hci_resend_last(struct hci_dev *hdev)
3224{
3225 struct hci_command_hdr *sent;
3226 struct sk_buff *skb;
3227 u16 opcode;
3228
3229 if (!hdev->sent_cmd)
3230 return;
3231
3232 sent = (void *) hdev->sent_cmd->data;
3233 opcode = __le16_to_cpu(sent->opcode);
3234 if (opcode == HCI_OP_RESET)
3235 return;
3236
3237 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3238 if (!skb)
3239 return;
3240
3241 skb_queue_head(&hdev->cmd_q, skb);
3242 queue_work(hdev->workqueue, &hdev->cmd_work);
3243}
3244
9238f36a
JH
3245void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3246{
3247 hci_req_complete_t req_complete = NULL;
3248 struct sk_buff *skb;
3249 unsigned long flags;
3250
3251 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3252
42c6b129
JH
3253 /* If the completed command doesn't match the last one that was
3254 * sent we need to do special handling of it.
9238f36a 3255 */
42c6b129
JH
3256 if (!hci_sent_cmd_data(hdev, opcode)) {
3257 /* Some CSR based controllers generate a spontaneous
3258 * reset complete event during init and any pending
3259 * command will never be completed. In such a case we
3260 * need to resend whatever was the last sent
3261 * command.
3262 */
3263 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3264 hci_resend_last(hdev);
3265
9238f36a 3266 return;
42c6b129 3267 }
9238f36a
JH
3268
3269 /* If the command succeeded and there's still more commands in
3270 * this request the request is not yet complete.
3271 */
3272 if (!status && !hci_req_is_complete(hdev))
3273 return;
3274
3275 /* If this was the last command in a request the complete
3276 * callback would be found in hdev->sent_cmd instead of the
3277 * command queue (hdev->cmd_q).
3278 */
3279 if (hdev->sent_cmd) {
3280 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3281 if (req_complete)
3282 goto call_complete;
3283 }
3284
3285 /* Remove all pending commands belonging to this request */
3286 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3287 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3288 if (bt_cb(skb)->req.start) {
3289 __skb_queue_head(&hdev->cmd_q, skb);
3290 break;
3291 }
3292
3293 req_complete = bt_cb(skb)->req.complete;
3294 kfree_skb(skb);
3295 }
3296 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3297
3298call_complete:
3299 if (req_complete)
3300 req_complete(hdev, status);
3301}
3302
3303void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3304{
3305 hci_req_complete_t req_complete = NULL;
3306
3307 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3308
3309 if (status) {
3310 hci_req_cmd_complete(hdev, opcode, status);
3311 return;
3312 }
3313
3314 /* No need to handle success status if there are more commands */
3315 if (!hci_req_is_complete(hdev))
3316 return;
3317
3318 if (hdev->sent_cmd)
3319 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3320
3321 /* If the request doesn't have a complete callback or there
3322 * are other commands/requests in the hdev queue we consider
3323 * this request as completed.
3324 */
3325 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3326 hci_req_cmd_complete(hdev, opcode, status);
3327}
3328
b78752cc 3329static void hci_rx_work(struct work_struct *work)
1da177e4 3330{
b78752cc 3331 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3332 struct sk_buff *skb;
3333
3334 BT_DBG("%s", hdev->name);
3335
1da177e4 3336 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3337 /* Send copy to monitor */
3338 hci_send_to_monitor(hdev, skb);
3339
1da177e4
LT
3340 if (atomic_read(&hdev->promisc)) {
3341 /* Send copy to the sockets */
470fe1b5 3342 hci_send_to_sock(hdev, skb);
1da177e4
LT
3343 }
3344
3345 if (test_bit(HCI_RAW, &hdev->flags)) {
3346 kfree_skb(skb);
3347 continue;
3348 }
3349
3350 if (test_bit(HCI_INIT, &hdev->flags)) {
3351 /* Don't process data packets in this states. */
0d48d939 3352 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3353 case HCI_ACLDATA_PKT:
3354 case HCI_SCODATA_PKT:
3355 kfree_skb(skb);
3356 continue;
3ff50b79 3357 }
1da177e4
LT
3358 }
3359
3360 /* Process frame */
0d48d939 3361 switch (bt_cb(skb)->pkt_type) {
1da177e4 3362 case HCI_EVENT_PKT:
b78752cc 3363 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3364 hci_event_packet(hdev, skb);
3365 break;
3366
3367 case HCI_ACLDATA_PKT:
3368 BT_DBG("%s ACL data packet", hdev->name);
3369 hci_acldata_packet(hdev, skb);
3370 break;
3371
3372 case HCI_SCODATA_PKT:
3373 BT_DBG("%s SCO data packet", hdev->name);
3374 hci_scodata_packet(hdev, skb);
3375 break;
3376
3377 default:
3378 kfree_skb(skb);
3379 break;
3380 }
3381 }
1da177e4
LT
3382}
3383
c347b765 3384static void hci_cmd_work(struct work_struct *work)
1da177e4 3385{
c347b765 3386 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3387 struct sk_buff *skb;
3388
2104786b
AE
3389 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3390 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3391
1da177e4 3392 /* Send queued commands */
5a08ecce
AE
3393 if (atomic_read(&hdev->cmd_cnt)) {
3394 skb = skb_dequeue(&hdev->cmd_q);
3395 if (!skb)
3396 return;
3397
7585b97a 3398 kfree_skb(hdev->sent_cmd);
1da177e4 3399
70f23020
AE
3400 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3401 if (hdev->sent_cmd) {
1da177e4
LT
3402 atomic_dec(&hdev->cmd_cnt);
3403 hci_send_frame(skb);
7bdb8a5c
SJ
3404 if (test_bit(HCI_RESET, &hdev->flags))
3405 del_timer(&hdev->cmd_timer);
3406 else
3407 mod_timer(&hdev->cmd_timer,
5f246e89 3408 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3409 } else {
3410 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3411 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3412 }
3413 }
3414}
2519a1fc
AG
3415
3416int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3417{
3418 /* General inquiry access code (GIAC) */
3419 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3420 struct hci_cp_inquiry cp;
3421
3422 BT_DBG("%s", hdev->name);
3423
3424 if (test_bit(HCI_INQUIRY, &hdev->flags))
3425 return -EINPROGRESS;
3426
4663262c
JH
3427 inquiry_cache_flush(hdev);
3428
2519a1fc
AG
3429 memset(&cp, 0, sizeof(cp));
3430 memcpy(&cp.lap, lap, sizeof(cp.lap));
3431 cp.length = length;
3432
3433 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3434}
023d5049
AG
3435
3436int hci_cancel_inquiry(struct hci_dev *hdev)
3437{
3438 BT_DBG("%s", hdev->name);
3439
3440 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 3441 return -EALREADY;
023d5049
AG
3442
3443 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3444}
31f7956c
AG
3445
3446u8 bdaddr_to_le(u8 bdaddr_type)
3447{
3448 switch (bdaddr_type) {
3449 case BDADDR_LE_PUBLIC:
3450 return ADDR_LE_DEV_PUBLIC;
3451
3452 default:
3453 /* Fallback to LE Random address type */
3454 return ADDR_LE_DEV_RANDOM;
3455 }
3456}