Bluetooth: HCI request error handling
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
01178cd4 83static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
84 void (*func)(struct hci_request *req,
85 unsigned long opt),
01178cd4 86 unsigned long opt, __u32 timeout)
1da177e4 87{
42c6b129 88 struct hci_request req;
1da177e4
LT
89 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
42c6b129
JH
94 hci_req_init(&req, hdev);
95
1da177e4
LT
96 hdev->req_status = HCI_REQ_PEND;
97
42c6b129 98 func(&req, opt);
53cce22d 99
42c6b129
JH
100 err = hci_req_run(&req, hci_req_sync_complete);
101 if (err < 0) {
53cce22d 102 hdev->req_status = 0;
920c8300
AG
103
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
42c6b129 108 */
920c8300
AG
109 if (err == -ENODATA)
110 return 0;
111
112 return err;
53cce22d
JH
113 }
114
bc4445c7
AG
115 add_wait_queue(&hdev->req_wait_q, &wait);
116 set_current_state(TASK_INTERRUPTIBLE);
117
1da177e4
LT
118 schedule_timeout(timeout);
119
120 remove_wait_queue(&hdev->req_wait_q, &wait);
121
122 if (signal_pending(current))
123 return -EINTR;
124
125 switch (hdev->req_status) {
126 case HCI_REQ_DONE:
e175072f 127 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
128 break;
129
130 case HCI_REQ_CANCELED:
131 err = -hdev->req_result;
132 break;
133
134 default:
135 err = -ETIMEDOUT;
136 break;
3ff50b79 137 }
1da177e4 138
a5040efa 139 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
140
141 BT_DBG("%s end: err %d", hdev->name, err);
142
143 return err;
144}
145
01178cd4 146static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
147 void (*req)(struct hci_request *req,
148 unsigned long opt),
01178cd4 149 unsigned long opt, __u32 timeout)
1da177e4
LT
150{
151 int ret;
152
7c6a329e
MH
153 if (!test_bit(HCI_UP, &hdev->flags))
154 return -ENETDOWN;
155
1da177e4
LT
156 /* Serialize all requests */
157 hci_req_lock(hdev);
01178cd4 158 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
159 hci_req_unlock(hdev);
160
161 return ret;
162}
163
42c6b129 164static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 165{
42c6b129 166 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
167
168 /* Reset device */
42c6b129
JH
169 set_bit(HCI_RESET, &req->hdev->flags);
170 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
171}
172
42c6b129 173static void bredr_init(struct hci_request *req)
1da177e4 174{
42c6b129 175 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 176
1da177e4 177 /* Read Local Supported Features */
42c6b129 178 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 179
1143e5a6 180 /* Read Local Version */
42c6b129 181 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
182
183 /* Read BD Address */
42c6b129 184 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
185}
186
42c6b129 187static void amp_init(struct hci_request *req)
e61ef499 188{
42c6b129 189 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 190
e61ef499 191 /* Read Local Version */
42c6b129 192 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
193
194 /* Read Local AMP Info */
42c6b129 195 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
196
197 /* Read Data Blk size */
42c6b129 198 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
199}
200
42c6b129 201static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 202{
42c6b129
JH
203 struct hci_dev *hdev = req->hdev;
204 struct hci_request init_req;
e61ef499
AE
205 struct sk_buff *skb;
206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
42c6b129
JH
211 hci_req_init(&init_req, hdev);
212
e61ef499
AE
213 /* Special commands */
214 while ((skb = skb_dequeue(&hdev->driver_init))) {
215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216 skb->dev = (void *) hdev;
217
42c6b129
JH
218 if (skb_queue_empty(&init_req.cmd_q))
219 bt_cb(skb)->req.start = true;
220
221 skb_queue_tail(&init_req.cmd_q, skb);
e61ef499
AE
222 }
223 skb_queue_purge(&hdev->driver_init);
224
42c6b129
JH
225 hci_req_run(&init_req, NULL);
226
11778716
AE
227 /* Reset */
228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 229 hci_reset_req(req, 0);
11778716 230
e61ef499
AE
231 switch (hdev->dev_type) {
232 case HCI_BREDR:
42c6b129 233 bredr_init(req);
e61ef499
AE
234 break;
235
236 case HCI_AMP:
42c6b129 237 amp_init(req);
e61ef499
AE
238 break;
239
240 default:
241 BT_ERR("Unknown device type %d", hdev->dev_type);
242 break;
243 }
e61ef499
AE
244}
245
42c6b129 246static void bredr_setup(struct hci_request *req)
2177bab5
JH
247{
248 struct hci_cp_delete_stored_link_key cp;
249 __le16 param;
250 __u8 flt_type;
251
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 253 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
254
255 /* Read Class of Device */
42c6b129 256 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
257
258 /* Read Local Name */
42c6b129 259 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
260
261 /* Read Voice Setting */
42c6b129 262 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
263
264 /* Clear Event Filters */
265 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 266 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
267
268 /* Connection accept timeout ~20 secs */
269 param = __constant_cpu_to_le16(0x7d00);
42c6b129 270 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
271
272 bacpy(&cp.bdaddr, BDADDR_ANY);
273 cp.delete_all = 0x01;
42c6b129 274 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
2177bab5
JH
275}
276
42c6b129 277static void le_setup(struct hci_request *req)
2177bab5
JH
278{
279 /* Read LE Buffer Size */
42c6b129 280 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
281
282 /* Read LE Local Supported Features */
42c6b129 283 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
284
285 /* Read LE Advertising Channel TX Power */
42c6b129 286 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
287
288 /* Read LE White List Size */
42c6b129 289 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
290
291 /* Read LE Supported States */
42c6b129 292 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
2177bab5
JH
293}
294
295static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
296{
297 if (lmp_ext_inq_capable(hdev))
298 return 0x02;
299
300 if (lmp_inq_rssi_capable(hdev))
301 return 0x01;
302
303 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
304 hdev->lmp_subver == 0x0757)
305 return 0x01;
306
307 if (hdev->manufacturer == 15) {
308 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
309 return 0x01;
310 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
311 return 0x01;
312 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
313 return 0x01;
314 }
315
316 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
317 hdev->lmp_subver == 0x1805)
318 return 0x01;
319
320 return 0x00;
321}
322
42c6b129 323static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
324{
325 u8 mode;
326
42c6b129 327 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 328
42c6b129 329 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
330}
331
42c6b129 332static void hci_setup_event_mask(struct hci_request *req)
2177bab5 333{
42c6b129
JH
334 struct hci_dev *hdev = req->hdev;
335
2177bab5
JH
336 /* The second byte is 0xff instead of 0x9f (two reserved bits
337 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
338 * command otherwise.
339 */
340 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
341
342 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
343 * any event mask for pre 1.2 devices.
344 */
345 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
346 return;
347
348 if (lmp_bredr_capable(hdev)) {
349 events[4] |= 0x01; /* Flow Specification Complete */
350 events[4] |= 0x02; /* Inquiry Result with RSSI */
351 events[4] |= 0x04; /* Read Remote Extended Features Complete */
352 events[5] |= 0x08; /* Synchronous Connection Complete */
353 events[5] |= 0x10; /* Synchronous Connection Changed */
354 }
355
356 if (lmp_inq_rssi_capable(hdev))
357 events[4] |= 0x02; /* Inquiry Result with RSSI */
358
359 if (lmp_sniffsubr_capable(hdev))
360 events[5] |= 0x20; /* Sniff Subrating */
361
362 if (lmp_pause_enc_capable(hdev))
363 events[5] |= 0x80; /* Encryption Key Refresh Complete */
364
365 if (lmp_ext_inq_capable(hdev))
366 events[5] |= 0x40; /* Extended Inquiry Result */
367
368 if (lmp_no_flush_capable(hdev))
369 events[7] |= 0x01; /* Enhanced Flush Complete */
370
371 if (lmp_lsto_capable(hdev))
372 events[6] |= 0x80; /* Link Supervision Timeout Changed */
373
374 if (lmp_ssp_capable(hdev)) {
375 events[6] |= 0x01; /* IO Capability Request */
376 events[6] |= 0x02; /* IO Capability Response */
377 events[6] |= 0x04; /* User Confirmation Request */
378 events[6] |= 0x08; /* User Passkey Request */
379 events[6] |= 0x10; /* Remote OOB Data Request */
380 events[6] |= 0x20; /* Simple Pairing Complete */
381 events[7] |= 0x04; /* User Passkey Notification */
382 events[7] |= 0x08; /* Keypress Notification */
383 events[7] |= 0x10; /* Remote Host Supported
384 * Features Notification
385 */
386 }
387
388 if (lmp_le_capable(hdev))
389 events[7] |= 0x20; /* LE Meta-Event */
390
42c6b129 391 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
392
393 if (lmp_le_capable(hdev)) {
394 memset(events, 0, sizeof(events));
395 events[0] = 0x1f;
42c6b129
JH
396 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
397 sizeof(events), events);
2177bab5
JH
398 }
399}
400
42c6b129 401static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 402{
42c6b129
JH
403 struct hci_dev *hdev = req->hdev;
404
2177bab5 405 if (lmp_bredr_capable(hdev))
42c6b129 406 bredr_setup(req);
2177bab5
JH
407
408 if (lmp_le_capable(hdev))
42c6b129 409 le_setup(req);
2177bab5 410
42c6b129 411 hci_setup_event_mask(req);
2177bab5
JH
412
413 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 414 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
415
416 if (lmp_ssp_capable(hdev)) {
417 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
418 u8 mode = 0x01;
42c6b129
JH
419 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
420 sizeof(mode), &mode);
2177bab5
JH
421 } else {
422 struct hci_cp_write_eir cp;
423
424 memset(hdev->eir, 0, sizeof(hdev->eir));
425 memset(&cp, 0, sizeof(cp));
426
42c6b129 427 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
428 }
429 }
430
431 if (lmp_inq_rssi_capable(hdev))
42c6b129 432 hci_setup_inquiry_mode(req);
2177bab5
JH
433
434 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 435 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
436
437 if (lmp_ext_feat_capable(hdev)) {
438 struct hci_cp_read_local_ext_features cp;
439
440 cp.page = 0x01;
42c6b129
JH
441 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
442 sizeof(cp), &cp);
2177bab5
JH
443 }
444
445 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
446 u8 enable = 1;
42c6b129
JH
447 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
448 &enable);
2177bab5
JH
449 }
450}
451
42c6b129 452static void hci_setup_link_policy(struct hci_request *req)
2177bab5 453{
42c6b129 454 struct hci_dev *hdev = req->hdev;
2177bab5
JH
455 struct hci_cp_write_def_link_policy cp;
456 u16 link_policy = 0;
457
458 if (lmp_rswitch_capable(hdev))
459 link_policy |= HCI_LP_RSWITCH;
460 if (lmp_hold_capable(hdev))
461 link_policy |= HCI_LP_HOLD;
462 if (lmp_sniff_capable(hdev))
463 link_policy |= HCI_LP_SNIFF;
464 if (lmp_park_capable(hdev))
465 link_policy |= HCI_LP_PARK;
466
467 cp.policy = cpu_to_le16(link_policy);
42c6b129 468 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
469}
470
42c6b129 471static void hci_set_le_support(struct hci_request *req)
2177bab5 472{
42c6b129 473 struct hci_dev *hdev = req->hdev;
2177bab5
JH
474 struct hci_cp_write_le_host_supported cp;
475
476 memset(&cp, 0, sizeof(cp));
477
478 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
479 cp.le = 0x01;
480 cp.simul = lmp_le_br_capable(hdev);
481 }
482
483 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
484 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
485 &cp);
2177bab5
JH
486}
487
42c6b129 488static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 489{
42c6b129
JH
490 struct hci_dev *hdev = req->hdev;
491
2177bab5 492 if (hdev->commands[5] & 0x10)
42c6b129 493 hci_setup_link_policy(req);
2177bab5
JH
494
495 if (lmp_le_capable(hdev))
42c6b129 496 hci_set_le_support(req);
2177bab5
JH
497}
498
499static int __hci_init(struct hci_dev *hdev)
500{
501 int err;
502
503 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
504 if (err < 0)
505 return err;
506
507 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
508 * BR/EDR/LE type controllers. AMP controllers only need the
509 * first stage init.
510 */
511 if (hdev->dev_type != HCI_BREDR)
512 return 0;
513
514 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
515 if (err < 0)
516 return err;
517
518 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
519}
520
42c6b129 521static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
522{
523 __u8 scan = opt;
524
42c6b129 525 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
526
527 /* Inquiry and Page scans */
42c6b129 528 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
529}
530
42c6b129 531static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
532{
533 __u8 auth = opt;
534
42c6b129 535 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
536
537 /* Authentication */
42c6b129 538 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
539}
540
42c6b129 541static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
542{
543 __u8 encrypt = opt;
544
42c6b129 545 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 546
e4e8e37c 547 /* Encryption */
42c6b129 548 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
549}
550
42c6b129 551static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
552{
553 __le16 policy = cpu_to_le16(opt);
554
42c6b129 555 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
556
557 /* Default link policy */
42c6b129 558 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
559}
560
8e87d142 561/* Get HCI device by index.
1da177e4
LT
562 * Device is held on return. */
563struct hci_dev *hci_dev_get(int index)
564{
8035ded4 565 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
566
567 BT_DBG("%d", index);
568
569 if (index < 0)
570 return NULL;
571
572 read_lock(&hci_dev_list_lock);
8035ded4 573 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
574 if (d->id == index) {
575 hdev = hci_dev_hold(d);
576 break;
577 }
578 }
579 read_unlock(&hci_dev_list_lock);
580 return hdev;
581}
1da177e4
LT
582
583/* ---- Inquiry support ---- */
ff9ef578 584
30dc78e1
JH
585bool hci_discovery_active(struct hci_dev *hdev)
586{
587 struct discovery_state *discov = &hdev->discovery;
588
6fbe195d 589 switch (discov->state) {
343f935b 590 case DISCOVERY_FINDING:
6fbe195d 591 case DISCOVERY_RESOLVING:
30dc78e1
JH
592 return true;
593
6fbe195d
AG
594 default:
595 return false;
596 }
30dc78e1
JH
597}
598
ff9ef578
JH
599void hci_discovery_set_state(struct hci_dev *hdev, int state)
600{
601 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
602
603 if (hdev->discovery.state == state)
604 return;
605
606 switch (state) {
607 case DISCOVERY_STOPPED:
7b99b659
AG
608 if (hdev->discovery.state != DISCOVERY_STARTING)
609 mgmt_discovering(hdev, 0);
ff9ef578
JH
610 break;
611 case DISCOVERY_STARTING:
612 break;
343f935b 613 case DISCOVERY_FINDING:
ff9ef578
JH
614 mgmt_discovering(hdev, 1);
615 break;
30dc78e1
JH
616 case DISCOVERY_RESOLVING:
617 break;
ff9ef578
JH
618 case DISCOVERY_STOPPING:
619 break;
620 }
621
622 hdev->discovery.state = state;
623}
624
1da177e4
LT
625static void inquiry_cache_flush(struct hci_dev *hdev)
626{
30883512 627 struct discovery_state *cache = &hdev->discovery;
b57c1a56 628 struct inquiry_entry *p, *n;
1da177e4 629
561aafbc
JH
630 list_for_each_entry_safe(p, n, &cache->all, all) {
631 list_del(&p->all);
b57c1a56 632 kfree(p);
1da177e4 633 }
561aafbc
JH
634
635 INIT_LIST_HEAD(&cache->unknown);
636 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
637}
638
a8c5fb1a
GP
639struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
640 bdaddr_t *bdaddr)
1da177e4 641{
30883512 642 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
643 struct inquiry_entry *e;
644
6ed93dc6 645 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 646
561aafbc
JH
647 list_for_each_entry(e, &cache->all, all) {
648 if (!bacmp(&e->data.bdaddr, bdaddr))
649 return e;
650 }
651
652 return NULL;
653}
654
655struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 656 bdaddr_t *bdaddr)
561aafbc 657{
30883512 658 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
659 struct inquiry_entry *e;
660
6ed93dc6 661 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
662
663 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 664 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
665 return e;
666 }
667
668 return NULL;
1da177e4
LT
669}
670
30dc78e1 671struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
672 bdaddr_t *bdaddr,
673 int state)
30dc78e1
JH
674{
675 struct discovery_state *cache = &hdev->discovery;
676 struct inquiry_entry *e;
677
6ed93dc6 678 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
679
680 list_for_each_entry(e, &cache->resolve, list) {
681 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
682 return e;
683 if (!bacmp(&e->data.bdaddr, bdaddr))
684 return e;
685 }
686
687 return NULL;
688}
689
a3d4e20a 690void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 691 struct inquiry_entry *ie)
a3d4e20a
JH
692{
693 struct discovery_state *cache = &hdev->discovery;
694 struct list_head *pos = &cache->resolve;
695 struct inquiry_entry *p;
696
697 list_del(&ie->list);
698
699 list_for_each_entry(p, &cache->resolve, list) {
700 if (p->name_state != NAME_PENDING &&
a8c5fb1a 701 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
702 break;
703 pos = &p->list;
704 }
705
706 list_add(&ie->list, pos);
707}
708
3175405b 709bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 710 bool name_known, bool *ssp)
1da177e4 711{
30883512 712 struct discovery_state *cache = &hdev->discovery;
70f23020 713 struct inquiry_entry *ie;
1da177e4 714
6ed93dc6 715 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 716
2b2fec4d
SJ
717 hci_remove_remote_oob_data(hdev, &data->bdaddr);
718
388fc8fa
JH
719 if (ssp)
720 *ssp = data->ssp_mode;
721
70f23020 722 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 723 if (ie) {
388fc8fa
JH
724 if (ie->data.ssp_mode && ssp)
725 *ssp = true;
726
a3d4e20a 727 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 728 data->rssi != ie->data.rssi) {
a3d4e20a
JH
729 ie->data.rssi = data->rssi;
730 hci_inquiry_cache_update_resolve(hdev, ie);
731 }
732
561aafbc 733 goto update;
a3d4e20a 734 }
561aafbc
JH
735
736 /* Entry not in the cache. Add new one. */
737 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
738 if (!ie)
3175405b 739 return false;
561aafbc
JH
740
741 list_add(&ie->all, &cache->all);
742
743 if (name_known) {
744 ie->name_state = NAME_KNOWN;
745 } else {
746 ie->name_state = NAME_NOT_KNOWN;
747 list_add(&ie->list, &cache->unknown);
748 }
70f23020 749
561aafbc
JH
750update:
751 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 752 ie->name_state != NAME_PENDING) {
561aafbc
JH
753 ie->name_state = NAME_KNOWN;
754 list_del(&ie->list);
1da177e4
LT
755 }
756
70f23020
AE
757 memcpy(&ie->data, data, sizeof(*data));
758 ie->timestamp = jiffies;
1da177e4 759 cache->timestamp = jiffies;
3175405b
JH
760
761 if (ie->name_state == NAME_NOT_KNOWN)
762 return false;
763
764 return true;
1da177e4
LT
765}
766
767static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
768{
30883512 769 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
770 struct inquiry_info *info = (struct inquiry_info *) buf;
771 struct inquiry_entry *e;
772 int copied = 0;
773
561aafbc 774 list_for_each_entry(e, &cache->all, all) {
1da177e4 775 struct inquiry_data *data = &e->data;
b57c1a56
JH
776
777 if (copied >= num)
778 break;
779
1da177e4
LT
780 bacpy(&info->bdaddr, &data->bdaddr);
781 info->pscan_rep_mode = data->pscan_rep_mode;
782 info->pscan_period_mode = data->pscan_period_mode;
783 info->pscan_mode = data->pscan_mode;
784 memcpy(info->dev_class, data->dev_class, 3);
785 info->clock_offset = data->clock_offset;
b57c1a56 786
1da177e4 787 info++;
b57c1a56 788 copied++;
1da177e4
LT
789 }
790
791 BT_DBG("cache %p, copied %d", cache, copied);
792 return copied;
793}
794
42c6b129 795static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
796{
797 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 798 struct hci_dev *hdev = req->hdev;
1da177e4
LT
799 struct hci_cp_inquiry cp;
800
801 BT_DBG("%s", hdev->name);
802
803 if (test_bit(HCI_INQUIRY, &hdev->flags))
804 return;
805
806 /* Start Inquiry */
807 memcpy(&cp.lap, &ir->lap, 3);
808 cp.length = ir->length;
809 cp.num_rsp = ir->num_rsp;
42c6b129 810 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
811}
812
813int hci_inquiry(void __user *arg)
814{
815 __u8 __user *ptr = arg;
816 struct hci_inquiry_req ir;
817 struct hci_dev *hdev;
818 int err = 0, do_inquiry = 0, max_rsp;
819 long timeo;
820 __u8 *buf;
821
822 if (copy_from_user(&ir, ptr, sizeof(ir)))
823 return -EFAULT;
824
5a08ecce
AE
825 hdev = hci_dev_get(ir.dev_id);
826 if (!hdev)
1da177e4
LT
827 return -ENODEV;
828
09fd0de5 829 hci_dev_lock(hdev);
8e87d142 830 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 831 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
832 inquiry_cache_flush(hdev);
833 do_inquiry = 1;
834 }
09fd0de5 835 hci_dev_unlock(hdev);
1da177e4 836
04837f64 837 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
838
839 if (do_inquiry) {
01178cd4
JH
840 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
841 timeo);
70f23020
AE
842 if (err < 0)
843 goto done;
844 }
1da177e4 845
8fc9ced3
GP
846 /* for unlimited number of responses we will use buffer with
847 * 255 entries
848 */
1da177e4
LT
849 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
850
851 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
852 * copy it to the user space.
853 */
01df8c31 854 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 855 if (!buf) {
1da177e4
LT
856 err = -ENOMEM;
857 goto done;
858 }
859
09fd0de5 860 hci_dev_lock(hdev);
1da177e4 861 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 862 hci_dev_unlock(hdev);
1da177e4
LT
863
864 BT_DBG("num_rsp %d", ir.num_rsp);
865
866 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
867 ptr += sizeof(ir);
868 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 869 ir.num_rsp))
1da177e4 870 err = -EFAULT;
8e87d142 871 } else
1da177e4
LT
872 err = -EFAULT;
873
874 kfree(buf);
875
876done:
877 hci_dev_put(hdev);
878 return err;
879}
880
3f0f524b
JH
881static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
882{
883 u8 ad_len = 0, flags = 0;
884 size_t name_len;
885
886 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
887 flags |= LE_AD_GENERAL;
888
889 if (!lmp_bredr_capable(hdev))
890 flags |= LE_AD_NO_BREDR;
891
892 if (lmp_le_br_capable(hdev))
893 flags |= LE_AD_SIM_LE_BREDR_CTRL;
894
895 if (lmp_host_le_br_capable(hdev))
896 flags |= LE_AD_SIM_LE_BREDR_HOST;
897
898 if (flags) {
899 BT_DBG("adv flags 0x%02x", flags);
900
901 ptr[0] = 2;
902 ptr[1] = EIR_FLAGS;
903 ptr[2] = flags;
904
905 ad_len += 3;
906 ptr += 3;
907 }
908
909 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
910 ptr[0] = 2;
911 ptr[1] = EIR_TX_POWER;
912 ptr[2] = (u8) hdev->adv_tx_power;
913
914 ad_len += 3;
915 ptr += 3;
916 }
917
918 name_len = strlen(hdev->dev_name);
919 if (name_len > 0) {
920 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
921
922 if (name_len > max_len) {
923 name_len = max_len;
924 ptr[1] = EIR_NAME_SHORT;
925 } else
926 ptr[1] = EIR_NAME_COMPLETE;
927
928 ptr[0] = name_len + 1;
929
930 memcpy(ptr + 2, hdev->dev_name, name_len);
931
932 ad_len += (name_len + 2);
933 ptr += (name_len + 2);
934 }
935
936 return ad_len;
937}
938
939int hci_update_ad(struct hci_dev *hdev)
940{
941 struct hci_cp_le_set_adv_data cp;
942 u8 len;
943 int err;
944
945 hci_dev_lock(hdev);
946
947 if (!lmp_le_capable(hdev)) {
948 err = -EINVAL;
949 goto unlock;
950 }
951
952 memset(&cp, 0, sizeof(cp));
953
954 len = create_ad(hdev, cp.data);
955
956 if (hdev->adv_data_len == len &&
957 memcmp(cp.data, hdev->adv_data, len) == 0) {
958 err = 0;
959 goto unlock;
960 }
961
962 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
963 hdev->adv_data_len = len;
964
965 cp.length = len;
966 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
967
968unlock:
969 hci_dev_unlock(hdev);
970
971 return err;
972}
973
1da177e4
LT
974/* ---- HCI ioctl helpers ---- */
975
976int hci_dev_open(__u16 dev)
977{
978 struct hci_dev *hdev;
979 int ret = 0;
980
5a08ecce
AE
981 hdev = hci_dev_get(dev);
982 if (!hdev)
1da177e4
LT
983 return -ENODEV;
984
985 BT_DBG("%s %p", hdev->name, hdev);
986
987 hci_req_lock(hdev);
988
94324962
JH
989 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
990 ret = -ENODEV;
991 goto done;
992 }
993
611b30f7
MH
994 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
995 ret = -ERFKILL;
996 goto done;
997 }
998
1da177e4
LT
999 if (test_bit(HCI_UP, &hdev->flags)) {
1000 ret = -EALREADY;
1001 goto done;
1002 }
1003
1004 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1005 set_bit(HCI_RAW, &hdev->flags);
1006
07e3b94a
AE
1007 /* Treat all non BR/EDR controllers as raw devices if
1008 enable_hs is not set */
1009 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
1010 set_bit(HCI_RAW, &hdev->flags);
1011
1da177e4
LT
1012 if (hdev->open(hdev)) {
1013 ret = -EIO;
1014 goto done;
1015 }
1016
1017 if (!test_bit(HCI_RAW, &hdev->flags)) {
1018 atomic_set(&hdev->cmd_cnt, 1);
1019 set_bit(HCI_INIT, &hdev->flags);
2177bab5 1020 ret = __hci_init(hdev);
1da177e4
LT
1021 clear_bit(HCI_INIT, &hdev->flags);
1022 }
1023
1024 if (!ret) {
1025 hci_dev_hold(hdev);
1026 set_bit(HCI_UP, &hdev->flags);
1027 hci_notify(hdev, HCI_DEV_UP);
3f0f524b 1028 hci_update_ad(hdev);
bb4b2a9a
AE
1029 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1030 mgmt_valid_hdev(hdev)) {
09fd0de5 1031 hci_dev_lock(hdev);
744cf19e 1032 mgmt_powered(hdev, 1);
09fd0de5 1033 hci_dev_unlock(hdev);
56e5cb86 1034 }
8e87d142 1035 } else {
1da177e4 1036 /* Init failed, cleanup */
3eff45ea 1037 flush_work(&hdev->tx_work);
c347b765 1038 flush_work(&hdev->cmd_work);
b78752cc 1039 flush_work(&hdev->rx_work);
1da177e4
LT
1040
1041 skb_queue_purge(&hdev->cmd_q);
1042 skb_queue_purge(&hdev->rx_q);
1043
1044 if (hdev->flush)
1045 hdev->flush(hdev);
1046
1047 if (hdev->sent_cmd) {
1048 kfree_skb(hdev->sent_cmd);
1049 hdev->sent_cmd = NULL;
1050 }
1051
1052 hdev->close(hdev);
1053 hdev->flags = 0;
1054 }
1055
1056done:
1057 hci_req_unlock(hdev);
1058 hci_dev_put(hdev);
1059 return ret;
1060}
1061
1062static int hci_dev_do_close(struct hci_dev *hdev)
1063{
1064 BT_DBG("%s %p", hdev->name, hdev);
1065
28b75a89
AG
1066 cancel_work_sync(&hdev->le_scan);
1067
78c04c0b
VCG
1068 cancel_delayed_work(&hdev->power_off);
1069
1da177e4
LT
1070 hci_req_cancel(hdev, ENODEV);
1071 hci_req_lock(hdev);
1072
1073 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1074 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1075 hci_req_unlock(hdev);
1076 return 0;
1077 }
1078
3eff45ea
GP
1079 /* Flush RX and TX works */
1080 flush_work(&hdev->tx_work);
b78752cc 1081 flush_work(&hdev->rx_work);
1da177e4 1082
16ab91ab 1083 if (hdev->discov_timeout > 0) {
e0f9309f 1084 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1085 hdev->discov_timeout = 0;
5e5282bb 1086 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1087 }
1088
a8b2d5c2 1089 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1090 cancel_delayed_work(&hdev->service_cache);
1091
7ba8b4be
AG
1092 cancel_delayed_work_sync(&hdev->le_scan_disable);
1093
09fd0de5 1094 hci_dev_lock(hdev);
1da177e4
LT
1095 inquiry_cache_flush(hdev);
1096 hci_conn_hash_flush(hdev);
09fd0de5 1097 hci_dev_unlock(hdev);
1da177e4
LT
1098
1099 hci_notify(hdev, HCI_DEV_DOWN);
1100
1101 if (hdev->flush)
1102 hdev->flush(hdev);
1103
1104 /* Reset device */
1105 skb_queue_purge(&hdev->cmd_q);
1106 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1107 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1108 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1109 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1110 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1111 clear_bit(HCI_INIT, &hdev->flags);
1112 }
1113
c347b765
GP
1114 /* flush cmd work */
1115 flush_work(&hdev->cmd_work);
1da177e4
LT
1116
1117 /* Drop queues */
1118 skb_queue_purge(&hdev->rx_q);
1119 skb_queue_purge(&hdev->cmd_q);
1120 skb_queue_purge(&hdev->raw_q);
1121
1122 /* Drop last sent command */
1123 if (hdev->sent_cmd) {
b79f44c1 1124 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1125 kfree_skb(hdev->sent_cmd);
1126 hdev->sent_cmd = NULL;
1127 }
1128
1129 /* After this point our queues are empty
1130 * and no tasks are scheduled. */
1131 hdev->close(hdev);
1132
bb4b2a9a
AE
1133 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1134 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1135 hci_dev_lock(hdev);
1136 mgmt_powered(hdev, 0);
1137 hci_dev_unlock(hdev);
1138 }
5add6af8 1139
1da177e4
LT
1140 /* Clear flags */
1141 hdev->flags = 0;
1142
ced5c338
AE
1143 /* Controller radio is available but is currently powered down */
1144 hdev->amp_status = 0;
1145
e59fda8d 1146 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1147 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1148
1da177e4
LT
1149 hci_req_unlock(hdev);
1150
1151 hci_dev_put(hdev);
1152 return 0;
1153}
1154
1155int hci_dev_close(__u16 dev)
1156{
1157 struct hci_dev *hdev;
1158 int err;
1159
70f23020
AE
1160 hdev = hci_dev_get(dev);
1161 if (!hdev)
1da177e4 1162 return -ENODEV;
8ee56540
MH
1163
1164 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1165 cancel_delayed_work(&hdev->power_off);
1166
1da177e4 1167 err = hci_dev_do_close(hdev);
8ee56540 1168
1da177e4
LT
1169 hci_dev_put(hdev);
1170 return err;
1171}
1172
1173int hci_dev_reset(__u16 dev)
1174{
1175 struct hci_dev *hdev;
1176 int ret = 0;
1177
70f23020
AE
1178 hdev = hci_dev_get(dev);
1179 if (!hdev)
1da177e4
LT
1180 return -ENODEV;
1181
1182 hci_req_lock(hdev);
1da177e4
LT
1183
1184 if (!test_bit(HCI_UP, &hdev->flags))
1185 goto done;
1186
1187 /* Drop queues */
1188 skb_queue_purge(&hdev->rx_q);
1189 skb_queue_purge(&hdev->cmd_q);
1190
09fd0de5 1191 hci_dev_lock(hdev);
1da177e4
LT
1192 inquiry_cache_flush(hdev);
1193 hci_conn_hash_flush(hdev);
09fd0de5 1194 hci_dev_unlock(hdev);
1da177e4
LT
1195
1196 if (hdev->flush)
1197 hdev->flush(hdev);
1198
8e87d142 1199 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1200 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1201
1202 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1203 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1204
1205done:
1da177e4
LT
1206 hci_req_unlock(hdev);
1207 hci_dev_put(hdev);
1208 return ret;
1209}
1210
1211int hci_dev_reset_stat(__u16 dev)
1212{
1213 struct hci_dev *hdev;
1214 int ret = 0;
1215
70f23020
AE
1216 hdev = hci_dev_get(dev);
1217 if (!hdev)
1da177e4
LT
1218 return -ENODEV;
1219
1220 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1221
1222 hci_dev_put(hdev);
1223
1224 return ret;
1225}
1226
1227int hci_dev_cmd(unsigned int cmd, void __user *arg)
1228{
1229 struct hci_dev *hdev;
1230 struct hci_dev_req dr;
1231 int err = 0;
1232
1233 if (copy_from_user(&dr, arg, sizeof(dr)))
1234 return -EFAULT;
1235
70f23020
AE
1236 hdev = hci_dev_get(dr.dev_id);
1237 if (!hdev)
1da177e4
LT
1238 return -ENODEV;
1239
1240 switch (cmd) {
1241 case HCISETAUTH:
01178cd4
JH
1242 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1243 HCI_INIT_TIMEOUT);
1da177e4
LT
1244 break;
1245
1246 case HCISETENCRYPT:
1247 if (!lmp_encrypt_capable(hdev)) {
1248 err = -EOPNOTSUPP;
1249 break;
1250 }
1251
1252 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1253 /* Auth must be enabled first */
01178cd4
JH
1254 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1255 HCI_INIT_TIMEOUT);
1da177e4
LT
1256 if (err)
1257 break;
1258 }
1259
01178cd4
JH
1260 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1261 HCI_INIT_TIMEOUT);
1da177e4
LT
1262 break;
1263
1264 case HCISETSCAN:
01178cd4
JH
1265 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1266 HCI_INIT_TIMEOUT);
1da177e4
LT
1267 break;
1268
1da177e4 1269 case HCISETLINKPOL:
01178cd4
JH
1270 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1271 HCI_INIT_TIMEOUT);
1da177e4
LT
1272 break;
1273
1274 case HCISETLINKMODE:
e4e8e37c
MH
1275 hdev->link_mode = ((__u16) dr.dev_opt) &
1276 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1277 break;
1278
1279 case HCISETPTYPE:
1280 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1281 break;
1282
1283 case HCISETACLMTU:
e4e8e37c
MH
1284 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1285 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1286 break;
1287
1288 case HCISETSCOMTU:
e4e8e37c
MH
1289 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1290 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1291 break;
1292
1293 default:
1294 err = -EINVAL;
1295 break;
1296 }
e4e8e37c 1297
1da177e4
LT
1298 hci_dev_put(hdev);
1299 return err;
1300}
1301
1302int hci_get_dev_list(void __user *arg)
1303{
8035ded4 1304 struct hci_dev *hdev;
1da177e4
LT
1305 struct hci_dev_list_req *dl;
1306 struct hci_dev_req *dr;
1da177e4
LT
1307 int n = 0, size, err;
1308 __u16 dev_num;
1309
1310 if (get_user(dev_num, (__u16 __user *) arg))
1311 return -EFAULT;
1312
1313 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1314 return -EINVAL;
1315
1316 size = sizeof(*dl) + dev_num * sizeof(*dr);
1317
70f23020
AE
1318 dl = kzalloc(size, GFP_KERNEL);
1319 if (!dl)
1da177e4
LT
1320 return -ENOMEM;
1321
1322 dr = dl->dev_req;
1323
f20d09d5 1324 read_lock(&hci_dev_list_lock);
8035ded4 1325 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1326 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1327 cancel_delayed_work(&hdev->power_off);
c542a06c 1328
a8b2d5c2
JH
1329 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1330 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1331
1da177e4
LT
1332 (dr + n)->dev_id = hdev->id;
1333 (dr + n)->dev_opt = hdev->flags;
c542a06c 1334
1da177e4
LT
1335 if (++n >= dev_num)
1336 break;
1337 }
f20d09d5 1338 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1339
1340 dl->dev_num = n;
1341 size = sizeof(*dl) + n * sizeof(*dr);
1342
1343 err = copy_to_user(arg, dl, size);
1344 kfree(dl);
1345
1346 return err ? -EFAULT : 0;
1347}
1348
1349int hci_get_dev_info(void __user *arg)
1350{
1351 struct hci_dev *hdev;
1352 struct hci_dev_info di;
1353 int err = 0;
1354
1355 if (copy_from_user(&di, arg, sizeof(di)))
1356 return -EFAULT;
1357
70f23020
AE
1358 hdev = hci_dev_get(di.dev_id);
1359 if (!hdev)
1da177e4
LT
1360 return -ENODEV;
1361
a8b2d5c2 1362 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1363 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1364
a8b2d5c2
JH
1365 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1366 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1367
1da177e4
LT
1368 strcpy(di.name, hdev->name);
1369 di.bdaddr = hdev->bdaddr;
943da25d 1370 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1371 di.flags = hdev->flags;
1372 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1373 if (lmp_bredr_capable(hdev)) {
1374 di.acl_mtu = hdev->acl_mtu;
1375 di.acl_pkts = hdev->acl_pkts;
1376 di.sco_mtu = hdev->sco_mtu;
1377 di.sco_pkts = hdev->sco_pkts;
1378 } else {
1379 di.acl_mtu = hdev->le_mtu;
1380 di.acl_pkts = hdev->le_pkts;
1381 di.sco_mtu = 0;
1382 di.sco_pkts = 0;
1383 }
1da177e4
LT
1384 di.link_policy = hdev->link_policy;
1385 di.link_mode = hdev->link_mode;
1386
1387 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1388 memcpy(&di.features, &hdev->features, sizeof(di.features));
1389
1390 if (copy_to_user(arg, &di, sizeof(di)))
1391 err = -EFAULT;
1392
1393 hci_dev_put(hdev);
1394
1395 return err;
1396}
1397
1398/* ---- Interface to HCI drivers ---- */
1399
611b30f7
MH
1400static int hci_rfkill_set_block(void *data, bool blocked)
1401{
1402 struct hci_dev *hdev = data;
1403
1404 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1405
1406 if (!blocked)
1407 return 0;
1408
1409 hci_dev_do_close(hdev);
1410
1411 return 0;
1412}
1413
1414static const struct rfkill_ops hci_rfkill_ops = {
1415 .set_block = hci_rfkill_set_block,
1416};
1417
ab81cbf9
JH
1418static void hci_power_on(struct work_struct *work)
1419{
1420 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1421
1422 BT_DBG("%s", hdev->name);
1423
1424 if (hci_dev_open(hdev->id) < 0)
1425 return;
1426
a8b2d5c2 1427 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1428 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1429 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1430
a8b2d5c2 1431 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1432 mgmt_index_added(hdev);
ab81cbf9
JH
1433}
1434
1435static void hci_power_off(struct work_struct *work)
1436{
3243553f 1437 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1438 power_off.work);
ab81cbf9
JH
1439
1440 BT_DBG("%s", hdev->name);
1441
8ee56540 1442 hci_dev_do_close(hdev);
ab81cbf9
JH
1443}
1444
16ab91ab
JH
1445static void hci_discov_off(struct work_struct *work)
1446{
1447 struct hci_dev *hdev;
1448 u8 scan = SCAN_PAGE;
1449
1450 hdev = container_of(work, struct hci_dev, discov_off.work);
1451
1452 BT_DBG("%s", hdev->name);
1453
09fd0de5 1454 hci_dev_lock(hdev);
16ab91ab
JH
1455
1456 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1457
1458 hdev->discov_timeout = 0;
1459
09fd0de5 1460 hci_dev_unlock(hdev);
16ab91ab
JH
1461}
1462
2aeb9a1a
JH
1463int hci_uuids_clear(struct hci_dev *hdev)
1464{
4821002c 1465 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1466
4821002c
JH
1467 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1468 list_del(&uuid->list);
2aeb9a1a
JH
1469 kfree(uuid);
1470 }
1471
1472 return 0;
1473}
1474
55ed8ca1
JH
1475int hci_link_keys_clear(struct hci_dev *hdev)
1476{
1477 struct list_head *p, *n;
1478
1479 list_for_each_safe(p, n, &hdev->link_keys) {
1480 struct link_key *key;
1481
1482 key = list_entry(p, struct link_key, list);
1483
1484 list_del(p);
1485 kfree(key);
1486 }
1487
1488 return 0;
1489}
1490
b899efaf
VCG
1491int hci_smp_ltks_clear(struct hci_dev *hdev)
1492{
1493 struct smp_ltk *k, *tmp;
1494
1495 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1496 list_del(&k->list);
1497 kfree(k);
1498 }
1499
1500 return 0;
1501}
1502
55ed8ca1
JH
1503struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1504{
8035ded4 1505 struct link_key *k;
55ed8ca1 1506
8035ded4 1507 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1508 if (bacmp(bdaddr, &k->bdaddr) == 0)
1509 return k;
55ed8ca1
JH
1510
1511 return NULL;
1512}
1513
745c0ce3 1514static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1515 u8 key_type, u8 old_key_type)
d25e28ab
JH
1516{
1517 /* Legacy key */
1518 if (key_type < 0x03)
745c0ce3 1519 return true;
d25e28ab
JH
1520
1521 /* Debug keys are insecure so don't store them persistently */
1522 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1523 return false;
d25e28ab
JH
1524
1525 /* Changed combination key and there's no previous one */
1526 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1527 return false;
d25e28ab
JH
1528
1529 /* Security mode 3 case */
1530 if (!conn)
745c0ce3 1531 return true;
d25e28ab
JH
1532
1533 /* Neither local nor remote side had no-bonding as requirement */
1534 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1535 return true;
d25e28ab
JH
1536
1537 /* Local side had dedicated bonding as requirement */
1538 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1539 return true;
d25e28ab
JH
1540
1541 /* Remote side had dedicated bonding as requirement */
1542 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1543 return true;
d25e28ab
JH
1544
1545 /* If none of the above criteria match, then don't store the key
1546 * persistently */
745c0ce3 1547 return false;
d25e28ab
JH
1548}
1549
c9839a11 1550struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1551{
c9839a11 1552 struct smp_ltk *k;
75d262c2 1553
c9839a11
VCG
1554 list_for_each_entry(k, &hdev->long_term_keys, list) {
1555 if (k->ediv != ediv ||
a8c5fb1a 1556 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1557 continue;
1558
c9839a11 1559 return k;
75d262c2
VCG
1560 }
1561
1562 return NULL;
1563}
75d262c2 1564
c9839a11 1565struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1566 u8 addr_type)
75d262c2 1567{
c9839a11 1568 struct smp_ltk *k;
75d262c2 1569
c9839a11
VCG
1570 list_for_each_entry(k, &hdev->long_term_keys, list)
1571 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1572 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1573 return k;
1574
1575 return NULL;
1576}
75d262c2 1577
d25e28ab 1578int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1579 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1580{
1581 struct link_key *key, *old_key;
745c0ce3
VA
1582 u8 old_key_type;
1583 bool persistent;
55ed8ca1
JH
1584
1585 old_key = hci_find_link_key(hdev, bdaddr);
1586 if (old_key) {
1587 old_key_type = old_key->type;
1588 key = old_key;
1589 } else {
12adcf3a 1590 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1591 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1592 if (!key)
1593 return -ENOMEM;
1594 list_add(&key->list, &hdev->link_keys);
1595 }
1596
6ed93dc6 1597 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1598
d25e28ab
JH
1599 /* Some buggy controller combinations generate a changed
1600 * combination key for legacy pairing even when there's no
1601 * previous key */
1602 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1603 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1604 type = HCI_LK_COMBINATION;
655fe6ec
JH
1605 if (conn)
1606 conn->key_type = type;
1607 }
d25e28ab 1608
55ed8ca1 1609 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1610 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1611 key->pin_len = pin_len;
1612
b6020ba0 1613 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1614 key->type = old_key_type;
4748fed2
JH
1615 else
1616 key->type = type;
1617
4df378a1
JH
1618 if (!new_key)
1619 return 0;
1620
1621 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1622
744cf19e 1623 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1624
6ec5bcad
VA
1625 if (conn)
1626 conn->flush_key = !persistent;
55ed8ca1
JH
1627
1628 return 0;
1629}
1630
c9839a11 1631int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1632 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1633 ediv, u8 rand[8])
75d262c2 1634{
c9839a11 1635 struct smp_ltk *key, *old_key;
75d262c2 1636
c9839a11
VCG
1637 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1638 return 0;
75d262c2 1639
c9839a11
VCG
1640 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1641 if (old_key)
75d262c2 1642 key = old_key;
c9839a11
VCG
1643 else {
1644 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1645 if (!key)
1646 return -ENOMEM;
c9839a11 1647 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1648 }
1649
75d262c2 1650 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1651 key->bdaddr_type = addr_type;
1652 memcpy(key->val, tk, sizeof(key->val));
1653 key->authenticated = authenticated;
1654 key->ediv = ediv;
1655 key->enc_size = enc_size;
1656 key->type = type;
1657 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1658
c9839a11
VCG
1659 if (!new_key)
1660 return 0;
75d262c2 1661
261cc5aa
VCG
1662 if (type & HCI_SMP_LTK)
1663 mgmt_new_ltk(hdev, key, 1);
1664
75d262c2
VCG
1665 return 0;
1666}
1667
55ed8ca1
JH
1668int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1669{
1670 struct link_key *key;
1671
1672 key = hci_find_link_key(hdev, bdaddr);
1673 if (!key)
1674 return -ENOENT;
1675
6ed93dc6 1676 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1677
1678 list_del(&key->list);
1679 kfree(key);
1680
1681 return 0;
1682}
1683
b899efaf
VCG
1684int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1685{
1686 struct smp_ltk *k, *tmp;
1687
1688 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1689 if (bacmp(bdaddr, &k->bdaddr))
1690 continue;
1691
6ed93dc6 1692 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1693
1694 list_del(&k->list);
1695 kfree(k);
1696 }
1697
1698 return 0;
1699}
1700
6bd32326 1701/* HCI command timer function */
bda4f23a 1702static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1703{
1704 struct hci_dev *hdev = (void *) arg;
1705
bda4f23a
AE
1706 if (hdev->sent_cmd) {
1707 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1708 u16 opcode = __le16_to_cpu(sent->opcode);
1709
1710 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1711 } else {
1712 BT_ERR("%s command tx timeout", hdev->name);
1713 }
1714
6bd32326 1715 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1716 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1717}
1718
2763eda6 1719struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1720 bdaddr_t *bdaddr)
2763eda6
SJ
1721{
1722 struct oob_data *data;
1723
1724 list_for_each_entry(data, &hdev->remote_oob_data, list)
1725 if (bacmp(bdaddr, &data->bdaddr) == 0)
1726 return data;
1727
1728 return NULL;
1729}
1730
1731int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1732{
1733 struct oob_data *data;
1734
1735 data = hci_find_remote_oob_data(hdev, bdaddr);
1736 if (!data)
1737 return -ENOENT;
1738
6ed93dc6 1739 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1740
1741 list_del(&data->list);
1742 kfree(data);
1743
1744 return 0;
1745}
1746
1747int hci_remote_oob_data_clear(struct hci_dev *hdev)
1748{
1749 struct oob_data *data, *n;
1750
1751 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1752 list_del(&data->list);
1753 kfree(data);
1754 }
1755
1756 return 0;
1757}
1758
1759int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1760 u8 *randomizer)
2763eda6
SJ
1761{
1762 struct oob_data *data;
1763
1764 data = hci_find_remote_oob_data(hdev, bdaddr);
1765
1766 if (!data) {
1767 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1768 if (!data)
1769 return -ENOMEM;
1770
1771 bacpy(&data->bdaddr, bdaddr);
1772 list_add(&data->list, &hdev->remote_oob_data);
1773 }
1774
1775 memcpy(data->hash, hash, sizeof(data->hash));
1776 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1777
6ed93dc6 1778 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1779
1780 return 0;
1781}
1782
04124681 1783struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1784{
8035ded4 1785 struct bdaddr_list *b;
b2a66aad 1786
8035ded4 1787 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1788 if (bacmp(bdaddr, &b->bdaddr) == 0)
1789 return b;
b2a66aad
AJ
1790
1791 return NULL;
1792}
1793
1794int hci_blacklist_clear(struct hci_dev *hdev)
1795{
1796 struct list_head *p, *n;
1797
1798 list_for_each_safe(p, n, &hdev->blacklist) {
1799 struct bdaddr_list *b;
1800
1801 b = list_entry(p, struct bdaddr_list, list);
1802
1803 list_del(p);
1804 kfree(b);
1805 }
1806
1807 return 0;
1808}
1809
88c1fe4b 1810int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1811{
1812 struct bdaddr_list *entry;
b2a66aad
AJ
1813
1814 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1815 return -EBADF;
1816
5e762444
AJ
1817 if (hci_blacklist_lookup(hdev, bdaddr))
1818 return -EEXIST;
b2a66aad
AJ
1819
1820 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1821 if (!entry)
1822 return -ENOMEM;
b2a66aad
AJ
1823
1824 bacpy(&entry->bdaddr, bdaddr);
1825
1826 list_add(&entry->list, &hdev->blacklist);
1827
88c1fe4b 1828 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1829}
1830
88c1fe4b 1831int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1832{
1833 struct bdaddr_list *entry;
b2a66aad 1834
1ec918ce 1835 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1836 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1837
1838 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1839 if (!entry)
5e762444 1840 return -ENOENT;
b2a66aad
AJ
1841
1842 list_del(&entry->list);
1843 kfree(entry);
1844
88c1fe4b 1845 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1846}
1847
42c6b129 1848static void le_scan_param_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1849{
1850 struct le_scan_params *param = (struct le_scan_params *) opt;
1851 struct hci_cp_le_set_scan_param cp;
1852
1853 memset(&cp, 0, sizeof(cp));
1854 cp.type = param->type;
1855 cp.interval = cpu_to_le16(param->interval);
1856 cp.window = cpu_to_le16(param->window);
1857
42c6b129 1858 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
7ba8b4be
AG
1859}
1860
42c6b129 1861static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1862{
1863 struct hci_cp_le_set_scan_enable cp;
1864
1865 memset(&cp, 0, sizeof(cp));
1866 cp.enable = 1;
0431a43c 1867 cp.filter_dup = 1;
7ba8b4be 1868
42c6b129 1869 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
7ba8b4be
AG
1870}
1871
1872static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1873 u16 window, int timeout)
7ba8b4be
AG
1874{
1875 long timeo = msecs_to_jiffies(3000);
1876 struct le_scan_params param;
1877 int err;
1878
1879 BT_DBG("%s", hdev->name);
1880
1881 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1882 return -EINPROGRESS;
1883
1884 param.type = type;
1885 param.interval = interval;
1886 param.window = window;
1887
1888 hci_req_lock(hdev);
1889
01178cd4
JH
1890 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1891 timeo);
7ba8b4be 1892 if (!err)
01178cd4 1893 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
1894
1895 hci_req_unlock(hdev);
1896
1897 if (err < 0)
1898 return err;
1899
46818ed5
JH
1900 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1901 msecs_to_jiffies(timeout));
7ba8b4be
AG
1902
1903 return 0;
1904}
1905
7dbfac1d
AG
1906int hci_cancel_le_scan(struct hci_dev *hdev)
1907{
1908 BT_DBG("%s", hdev->name);
1909
1910 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1911 return -EALREADY;
1912
1913 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1914 struct hci_cp_le_set_scan_enable cp;
1915
1916 /* Send HCI command to disable LE Scan */
1917 memset(&cp, 0, sizeof(cp));
1918 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1919 }
1920
1921 return 0;
1922}
1923
7ba8b4be
AG
1924static void le_scan_disable_work(struct work_struct *work)
1925{
1926 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1927 le_scan_disable.work);
7ba8b4be
AG
1928 struct hci_cp_le_set_scan_enable cp;
1929
1930 BT_DBG("%s", hdev->name);
1931
1932 memset(&cp, 0, sizeof(cp));
1933
1934 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1935}
1936
28b75a89
AG
1937static void le_scan_work(struct work_struct *work)
1938{
1939 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1940 struct le_scan_params *param = &hdev->le_scan_params;
1941
1942 BT_DBG("%s", hdev->name);
1943
04124681
GP
1944 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1945 param->timeout);
28b75a89
AG
1946}
1947
1948int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1949 int timeout)
28b75a89
AG
1950{
1951 struct le_scan_params *param = &hdev->le_scan_params;
1952
1953 BT_DBG("%s", hdev->name);
1954
f1550478
JH
1955 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1956 return -ENOTSUPP;
1957
28b75a89
AG
1958 if (work_busy(&hdev->le_scan))
1959 return -EINPROGRESS;
1960
1961 param->type = type;
1962 param->interval = interval;
1963 param->window = window;
1964 param->timeout = timeout;
1965
1966 queue_work(system_long_wq, &hdev->le_scan);
1967
1968 return 0;
1969}
1970
9be0dab7
DH
1971/* Alloc HCI device */
1972struct hci_dev *hci_alloc_dev(void)
1973{
1974 struct hci_dev *hdev;
1975
1976 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1977 if (!hdev)
1978 return NULL;
1979
b1b813d4
DH
1980 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1981 hdev->esco_type = (ESCO_HV1);
1982 hdev->link_mode = (HCI_LM_ACCEPT);
1983 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
1984 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1985 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 1986
b1b813d4
DH
1987 hdev->sniff_max_interval = 800;
1988 hdev->sniff_min_interval = 80;
1989
1990 mutex_init(&hdev->lock);
1991 mutex_init(&hdev->req_lock);
1992
1993 INIT_LIST_HEAD(&hdev->mgmt_pending);
1994 INIT_LIST_HEAD(&hdev->blacklist);
1995 INIT_LIST_HEAD(&hdev->uuids);
1996 INIT_LIST_HEAD(&hdev->link_keys);
1997 INIT_LIST_HEAD(&hdev->long_term_keys);
1998 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 1999 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2000
2001 INIT_WORK(&hdev->rx_work, hci_rx_work);
2002 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2003 INIT_WORK(&hdev->tx_work, hci_tx_work);
2004 INIT_WORK(&hdev->power_on, hci_power_on);
2005 INIT_WORK(&hdev->le_scan, le_scan_work);
2006
b1b813d4
DH
2007 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2008 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2009 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2010
9be0dab7 2011 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
2012 skb_queue_head_init(&hdev->rx_q);
2013 skb_queue_head_init(&hdev->cmd_q);
2014 skb_queue_head_init(&hdev->raw_q);
2015
2016 init_waitqueue_head(&hdev->req_wait_q);
2017
bda4f23a 2018 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2019
b1b813d4
DH
2020 hci_init_sysfs(hdev);
2021 discovery_init(hdev);
9be0dab7
DH
2022
2023 return hdev;
2024}
2025EXPORT_SYMBOL(hci_alloc_dev);
2026
2027/* Free HCI device */
2028void hci_free_dev(struct hci_dev *hdev)
2029{
2030 skb_queue_purge(&hdev->driver_init);
2031
2032 /* will free via device release */
2033 put_device(&hdev->dev);
2034}
2035EXPORT_SYMBOL(hci_free_dev);
2036
1da177e4
LT
2037/* Register HCI device */
2038int hci_register_dev(struct hci_dev *hdev)
2039{
b1b813d4 2040 int id, error;
1da177e4 2041
010666a1 2042 if (!hdev->open || !hdev->close)
1da177e4
LT
2043 return -EINVAL;
2044
08add513
MM
2045 /* Do not allow HCI_AMP devices to register at index 0,
2046 * so the index can be used as the AMP controller ID.
2047 */
3df92b31
SL
2048 switch (hdev->dev_type) {
2049 case HCI_BREDR:
2050 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2051 break;
2052 case HCI_AMP:
2053 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2054 break;
2055 default:
2056 return -EINVAL;
1da177e4 2057 }
8e87d142 2058
3df92b31
SL
2059 if (id < 0)
2060 return id;
2061
1da177e4
LT
2062 sprintf(hdev->name, "hci%d", id);
2063 hdev->id = id;
2d8b3a11
AE
2064
2065 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2066
3df92b31
SL
2067 write_lock(&hci_dev_list_lock);
2068 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2069 write_unlock(&hci_dev_list_lock);
1da177e4 2070
32845eb1 2071 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 2072 WQ_MEM_RECLAIM, 1);
33ca954d
DH
2073 if (!hdev->workqueue) {
2074 error = -ENOMEM;
2075 goto err;
2076 }
f48fd9c8 2077
6ead1bbc
JH
2078 hdev->req_workqueue = alloc_workqueue(hdev->name,
2079 WQ_HIGHPRI | WQ_UNBOUND |
2080 WQ_MEM_RECLAIM, 1);
2081 if (!hdev->req_workqueue) {
2082 destroy_workqueue(hdev->workqueue);
2083 error = -ENOMEM;
2084 goto err;
2085 }
2086
33ca954d
DH
2087 error = hci_add_sysfs(hdev);
2088 if (error < 0)
2089 goto err_wqueue;
1da177e4 2090
611b30f7 2091 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2092 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2093 hdev);
611b30f7
MH
2094 if (hdev->rfkill) {
2095 if (rfkill_register(hdev->rfkill) < 0) {
2096 rfkill_destroy(hdev->rfkill);
2097 hdev->rfkill = NULL;
2098 }
2099 }
2100
a8b2d5c2 2101 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2102
2103 if (hdev->dev_type != HCI_AMP)
2104 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2105
1da177e4 2106 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2107 hci_dev_hold(hdev);
1da177e4 2108
19202573 2109 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2110
1da177e4 2111 return id;
f48fd9c8 2112
33ca954d
DH
2113err_wqueue:
2114 destroy_workqueue(hdev->workqueue);
6ead1bbc 2115 destroy_workqueue(hdev->req_workqueue);
33ca954d 2116err:
3df92b31 2117 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2118 write_lock(&hci_dev_list_lock);
f48fd9c8 2119 list_del(&hdev->list);
f20d09d5 2120 write_unlock(&hci_dev_list_lock);
f48fd9c8 2121
33ca954d 2122 return error;
1da177e4
LT
2123}
2124EXPORT_SYMBOL(hci_register_dev);
2125
2126/* Unregister HCI device */
59735631 2127void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2128{
3df92b31 2129 int i, id;
ef222013 2130
c13854ce 2131 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2132
94324962
JH
2133 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2134
3df92b31
SL
2135 id = hdev->id;
2136
f20d09d5 2137 write_lock(&hci_dev_list_lock);
1da177e4 2138 list_del(&hdev->list);
f20d09d5 2139 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2140
2141 hci_dev_do_close(hdev);
2142
cd4c5391 2143 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2144 kfree_skb(hdev->reassembly[i]);
2145
b9b5ef18
GP
2146 cancel_work_sync(&hdev->power_on);
2147
ab81cbf9 2148 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2149 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2150 hci_dev_lock(hdev);
744cf19e 2151 mgmt_index_removed(hdev);
09fd0de5 2152 hci_dev_unlock(hdev);
56e5cb86 2153 }
ab81cbf9 2154
2e58ef3e
JH
2155 /* mgmt_index_removed should take care of emptying the
2156 * pending list */
2157 BUG_ON(!list_empty(&hdev->mgmt_pending));
2158
1da177e4
LT
2159 hci_notify(hdev, HCI_DEV_UNREG);
2160
611b30f7
MH
2161 if (hdev->rfkill) {
2162 rfkill_unregister(hdev->rfkill);
2163 rfkill_destroy(hdev->rfkill);
2164 }
2165
ce242970 2166 hci_del_sysfs(hdev);
147e2d59 2167
f48fd9c8 2168 destroy_workqueue(hdev->workqueue);
6ead1bbc 2169 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2170
09fd0de5 2171 hci_dev_lock(hdev);
e2e0cacb 2172 hci_blacklist_clear(hdev);
2aeb9a1a 2173 hci_uuids_clear(hdev);
55ed8ca1 2174 hci_link_keys_clear(hdev);
b899efaf 2175 hci_smp_ltks_clear(hdev);
2763eda6 2176 hci_remote_oob_data_clear(hdev);
09fd0de5 2177 hci_dev_unlock(hdev);
e2e0cacb 2178
dc946bd8 2179 hci_dev_put(hdev);
3df92b31
SL
2180
2181 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2182}
2183EXPORT_SYMBOL(hci_unregister_dev);
2184
2185/* Suspend HCI device */
2186int hci_suspend_dev(struct hci_dev *hdev)
2187{
2188 hci_notify(hdev, HCI_DEV_SUSPEND);
2189 return 0;
2190}
2191EXPORT_SYMBOL(hci_suspend_dev);
2192
2193/* Resume HCI device */
2194int hci_resume_dev(struct hci_dev *hdev)
2195{
2196 hci_notify(hdev, HCI_DEV_RESUME);
2197 return 0;
2198}
2199EXPORT_SYMBOL(hci_resume_dev);
2200
76bca880
MH
2201/* Receive frame from HCI drivers */
2202int hci_recv_frame(struct sk_buff *skb)
2203{
2204 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2205 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2206 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2207 kfree_skb(skb);
2208 return -ENXIO;
2209 }
2210
d82603c6 2211 /* Incoming skb */
76bca880
MH
2212 bt_cb(skb)->incoming = 1;
2213
2214 /* Time stamp */
2215 __net_timestamp(skb);
2216
76bca880 2217 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2218 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2219
76bca880
MH
2220 return 0;
2221}
2222EXPORT_SYMBOL(hci_recv_frame);
2223
33e882a5 2224static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2225 int count, __u8 index)
33e882a5
SS
2226{
2227 int len = 0;
2228 int hlen = 0;
2229 int remain = count;
2230 struct sk_buff *skb;
2231 struct bt_skb_cb *scb;
2232
2233 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2234 index >= NUM_REASSEMBLY)
33e882a5
SS
2235 return -EILSEQ;
2236
2237 skb = hdev->reassembly[index];
2238
2239 if (!skb) {
2240 switch (type) {
2241 case HCI_ACLDATA_PKT:
2242 len = HCI_MAX_FRAME_SIZE;
2243 hlen = HCI_ACL_HDR_SIZE;
2244 break;
2245 case HCI_EVENT_PKT:
2246 len = HCI_MAX_EVENT_SIZE;
2247 hlen = HCI_EVENT_HDR_SIZE;
2248 break;
2249 case HCI_SCODATA_PKT:
2250 len = HCI_MAX_SCO_SIZE;
2251 hlen = HCI_SCO_HDR_SIZE;
2252 break;
2253 }
2254
1e429f38 2255 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2256 if (!skb)
2257 return -ENOMEM;
2258
2259 scb = (void *) skb->cb;
2260 scb->expect = hlen;
2261 scb->pkt_type = type;
2262
2263 skb->dev = (void *) hdev;
2264 hdev->reassembly[index] = skb;
2265 }
2266
2267 while (count) {
2268 scb = (void *) skb->cb;
89bb46d0 2269 len = min_t(uint, scb->expect, count);
33e882a5
SS
2270
2271 memcpy(skb_put(skb, len), data, len);
2272
2273 count -= len;
2274 data += len;
2275 scb->expect -= len;
2276 remain = count;
2277
2278 switch (type) {
2279 case HCI_EVENT_PKT:
2280 if (skb->len == HCI_EVENT_HDR_SIZE) {
2281 struct hci_event_hdr *h = hci_event_hdr(skb);
2282 scb->expect = h->plen;
2283
2284 if (skb_tailroom(skb) < scb->expect) {
2285 kfree_skb(skb);
2286 hdev->reassembly[index] = NULL;
2287 return -ENOMEM;
2288 }
2289 }
2290 break;
2291
2292 case HCI_ACLDATA_PKT:
2293 if (skb->len == HCI_ACL_HDR_SIZE) {
2294 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2295 scb->expect = __le16_to_cpu(h->dlen);
2296
2297 if (skb_tailroom(skb) < scb->expect) {
2298 kfree_skb(skb);
2299 hdev->reassembly[index] = NULL;
2300 return -ENOMEM;
2301 }
2302 }
2303 break;
2304
2305 case HCI_SCODATA_PKT:
2306 if (skb->len == HCI_SCO_HDR_SIZE) {
2307 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2308 scb->expect = h->dlen;
2309
2310 if (skb_tailroom(skb) < scb->expect) {
2311 kfree_skb(skb);
2312 hdev->reassembly[index] = NULL;
2313 return -ENOMEM;
2314 }
2315 }
2316 break;
2317 }
2318
2319 if (scb->expect == 0) {
2320 /* Complete frame */
2321
2322 bt_cb(skb)->pkt_type = type;
2323 hci_recv_frame(skb);
2324
2325 hdev->reassembly[index] = NULL;
2326 return remain;
2327 }
2328 }
2329
2330 return remain;
2331}
2332
ef222013
MH
2333int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2334{
f39a3c06
SS
2335 int rem = 0;
2336
ef222013
MH
2337 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2338 return -EILSEQ;
2339
da5f6c37 2340 while (count) {
1e429f38 2341 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2342 if (rem < 0)
2343 return rem;
ef222013 2344
f39a3c06
SS
2345 data += (count - rem);
2346 count = rem;
f81c6224 2347 }
ef222013 2348
f39a3c06 2349 return rem;
ef222013
MH
2350}
2351EXPORT_SYMBOL(hci_recv_fragment);
2352
99811510
SS
2353#define STREAM_REASSEMBLY 0
2354
2355int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2356{
2357 int type;
2358 int rem = 0;
2359
da5f6c37 2360 while (count) {
99811510
SS
2361 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2362
2363 if (!skb) {
2364 struct { char type; } *pkt;
2365
2366 /* Start of the frame */
2367 pkt = data;
2368 type = pkt->type;
2369
2370 data++;
2371 count--;
2372 } else
2373 type = bt_cb(skb)->pkt_type;
2374
1e429f38 2375 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2376 STREAM_REASSEMBLY);
99811510
SS
2377 if (rem < 0)
2378 return rem;
2379
2380 data += (count - rem);
2381 count = rem;
f81c6224 2382 }
99811510
SS
2383
2384 return rem;
2385}
2386EXPORT_SYMBOL(hci_recv_stream_fragment);
2387
1da177e4
LT
2388/* ---- Interface to upper protocols ---- */
2389
1da177e4
LT
2390int hci_register_cb(struct hci_cb *cb)
2391{
2392 BT_DBG("%p name %s", cb, cb->name);
2393
f20d09d5 2394 write_lock(&hci_cb_list_lock);
1da177e4 2395 list_add(&cb->list, &hci_cb_list);
f20d09d5 2396 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2397
2398 return 0;
2399}
2400EXPORT_SYMBOL(hci_register_cb);
2401
2402int hci_unregister_cb(struct hci_cb *cb)
2403{
2404 BT_DBG("%p name %s", cb, cb->name);
2405
f20d09d5 2406 write_lock(&hci_cb_list_lock);
1da177e4 2407 list_del(&cb->list);
f20d09d5 2408 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2409
2410 return 0;
2411}
2412EXPORT_SYMBOL(hci_unregister_cb);
2413
2414static int hci_send_frame(struct sk_buff *skb)
2415{
2416 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2417
2418 if (!hdev) {
2419 kfree_skb(skb);
2420 return -ENODEV;
2421 }
2422
0d48d939 2423 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2424
cd82e61c
MH
2425 /* Time stamp */
2426 __net_timestamp(skb);
1da177e4 2427
cd82e61c
MH
2428 /* Send copy to monitor */
2429 hci_send_to_monitor(hdev, skb);
2430
2431 if (atomic_read(&hdev->promisc)) {
2432 /* Send copy to the sockets */
470fe1b5 2433 hci_send_to_sock(hdev, skb);
1da177e4
LT
2434 }
2435
2436 /* Get rid of skb owner, prior to sending to the driver. */
2437 skb_orphan(skb);
2438
2439 return hdev->send(skb);
2440}
2441
3119ae95
JH
2442void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2443{
2444 skb_queue_head_init(&req->cmd_q);
2445 req->hdev = hdev;
5d73e034 2446 req->err = 0;
3119ae95
JH
2447}
2448
2449int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2450{
2451 struct hci_dev *hdev = req->hdev;
2452 struct sk_buff *skb;
2453 unsigned long flags;
2454
2455 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2456
5d73e034
AG
2457 /* If an error occured during request building, remove all HCI
2458 * commands queued on the HCI request queue.
2459 */
2460 if (req->err) {
2461 skb_queue_purge(&req->cmd_q);
2462 return req->err;
2463 }
2464
3119ae95
JH
2465 /* Do not allow empty requests */
2466 if (skb_queue_empty(&req->cmd_q))
382b0c39 2467 return -ENODATA;
3119ae95
JH
2468
2469 skb = skb_peek_tail(&req->cmd_q);
2470 bt_cb(skb)->req.complete = complete;
2471
2472 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2473 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2474 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2475
2476 queue_work(hdev->workqueue, &hdev->cmd_work);
2477
2478 return 0;
2479}
2480
1ca3a9d0
JH
2481static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2482 u32 plen, void *param)
1da177e4
LT
2483{
2484 int len = HCI_COMMAND_HDR_SIZE + plen;
2485 struct hci_command_hdr *hdr;
2486 struct sk_buff *skb;
2487
1da177e4 2488 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2489 if (!skb)
2490 return NULL;
1da177e4
LT
2491
2492 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2493 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2494 hdr->plen = plen;
2495
2496 if (plen)
2497 memcpy(skb_put(skb, plen), param, plen);
2498
2499 BT_DBG("skb len %d", skb->len);
2500
0d48d939 2501 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2502 skb->dev = (void *) hdev;
c78ae283 2503
1ca3a9d0
JH
2504 return skb;
2505}
2506
2507/* Send HCI command */
2508int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2509{
2510 struct sk_buff *skb;
2511
2512 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2513
2514 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2515 if (!skb) {
2516 BT_ERR("%s no memory for command", hdev->name);
2517 return -ENOMEM;
2518 }
2519
11714b3d
JH
2520 /* Stand-alone HCI commands must be flaged as
2521 * single-command requests.
2522 */
2523 bt_cb(skb)->req.start = true;
2524
1da177e4 2525 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2526 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2527
2528 return 0;
2529}
1da177e4 2530
71c76a17
JH
2531/* Queue a command to an asynchronous HCI request */
2532int hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2533{
2534 struct hci_dev *hdev = req->hdev;
2535 struct sk_buff *skb;
2536
2537 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2538
2539 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2540 if (!skb) {
5d73e034
AG
2541 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2542 hdev->name, opcode);
2543 req->err = -ENOMEM;
71c76a17
JH
2544 return -ENOMEM;
2545 }
2546
2547 if (skb_queue_empty(&req->cmd_q))
2548 bt_cb(skb)->req.start = true;
2549
2550 skb_queue_tail(&req->cmd_q, skb);
2551
2552 return 0;
2553}
2554
1da177e4 2555/* Get data from the previously sent command */
a9de9248 2556void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2557{
2558 struct hci_command_hdr *hdr;
2559
2560 if (!hdev->sent_cmd)
2561 return NULL;
2562
2563 hdr = (void *) hdev->sent_cmd->data;
2564
a9de9248 2565 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2566 return NULL;
2567
f0e09510 2568 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2569
2570 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2571}
2572
2573/* Send ACL data */
2574static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2575{
2576 struct hci_acl_hdr *hdr;
2577 int len = skb->len;
2578
badff6d0
ACM
2579 skb_push(skb, HCI_ACL_HDR_SIZE);
2580 skb_reset_transport_header(skb);
9c70220b 2581 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2582 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2583 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2584}
2585
ee22be7e 2586static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2587 struct sk_buff *skb, __u16 flags)
1da177e4 2588{
ee22be7e 2589 struct hci_conn *conn = chan->conn;
1da177e4
LT
2590 struct hci_dev *hdev = conn->hdev;
2591 struct sk_buff *list;
2592
087bfd99
GP
2593 skb->len = skb_headlen(skb);
2594 skb->data_len = 0;
2595
2596 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2597
2598 switch (hdev->dev_type) {
2599 case HCI_BREDR:
2600 hci_add_acl_hdr(skb, conn->handle, flags);
2601 break;
2602 case HCI_AMP:
2603 hci_add_acl_hdr(skb, chan->handle, flags);
2604 break;
2605 default:
2606 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2607 return;
2608 }
087bfd99 2609
70f23020
AE
2610 list = skb_shinfo(skb)->frag_list;
2611 if (!list) {
1da177e4
LT
2612 /* Non fragmented */
2613 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2614
73d80deb 2615 skb_queue_tail(queue, skb);
1da177e4
LT
2616 } else {
2617 /* Fragmented */
2618 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2619
2620 skb_shinfo(skb)->frag_list = NULL;
2621
2622 /* Queue all fragments atomically */
af3e6359 2623 spin_lock(&queue->lock);
1da177e4 2624
73d80deb 2625 __skb_queue_tail(queue, skb);
e702112f
AE
2626
2627 flags &= ~ACL_START;
2628 flags |= ACL_CONT;
1da177e4
LT
2629 do {
2630 skb = list; list = list->next;
8e87d142 2631
1da177e4 2632 skb->dev = (void *) hdev;
0d48d939 2633 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2634 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2635
2636 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2637
73d80deb 2638 __skb_queue_tail(queue, skb);
1da177e4
LT
2639 } while (list);
2640
af3e6359 2641 spin_unlock(&queue->lock);
1da177e4 2642 }
73d80deb
LAD
2643}
2644
2645void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2646{
ee22be7e 2647 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2648
f0e09510 2649 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2650
2651 skb->dev = (void *) hdev;
73d80deb 2652
ee22be7e 2653 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2654
3eff45ea 2655 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2656}
1da177e4
LT
2657
2658/* Send SCO data */
0d861d8b 2659void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2660{
2661 struct hci_dev *hdev = conn->hdev;
2662 struct hci_sco_hdr hdr;
2663
2664 BT_DBG("%s len %d", hdev->name, skb->len);
2665
aca3192c 2666 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2667 hdr.dlen = skb->len;
2668
badff6d0
ACM
2669 skb_push(skb, HCI_SCO_HDR_SIZE);
2670 skb_reset_transport_header(skb);
9c70220b 2671 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2672
2673 skb->dev = (void *) hdev;
0d48d939 2674 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2675
1da177e4 2676 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2677 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2678}
1da177e4
LT
2679
2680/* ---- HCI TX task (outgoing data) ---- */
2681
2682/* HCI Connection scheduler */
6039aa73
GP
2683static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2684 int *quote)
1da177e4
LT
2685{
2686 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2687 struct hci_conn *conn = NULL, *c;
abc5de8f 2688 unsigned int num = 0, min = ~0;
1da177e4 2689
8e87d142 2690 /* We don't have to lock device here. Connections are always
1da177e4 2691 * added and removed with TX task disabled. */
bf4c6325
GP
2692
2693 rcu_read_lock();
2694
2695 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2696 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2697 continue;
769be974
MH
2698
2699 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2700 continue;
2701
1da177e4
LT
2702 num++;
2703
2704 if (c->sent < min) {
2705 min = c->sent;
2706 conn = c;
2707 }
52087a79
LAD
2708
2709 if (hci_conn_num(hdev, type) == num)
2710 break;
1da177e4
LT
2711 }
2712
bf4c6325
GP
2713 rcu_read_unlock();
2714
1da177e4 2715 if (conn) {
6ed58ec5
VT
2716 int cnt, q;
2717
2718 switch (conn->type) {
2719 case ACL_LINK:
2720 cnt = hdev->acl_cnt;
2721 break;
2722 case SCO_LINK:
2723 case ESCO_LINK:
2724 cnt = hdev->sco_cnt;
2725 break;
2726 case LE_LINK:
2727 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2728 break;
2729 default:
2730 cnt = 0;
2731 BT_ERR("Unknown link type");
2732 }
2733
2734 q = cnt / num;
1da177e4
LT
2735 *quote = q ? q : 1;
2736 } else
2737 *quote = 0;
2738
2739 BT_DBG("conn %p quote %d", conn, *quote);
2740 return conn;
2741}
2742
6039aa73 2743static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2744{
2745 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2746 struct hci_conn *c;
1da177e4 2747
bae1f5d9 2748 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2749
bf4c6325
GP
2750 rcu_read_lock();
2751
1da177e4 2752 /* Kill stalled connections */
bf4c6325 2753 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2754 if (c->type == type && c->sent) {
6ed93dc6
AE
2755 BT_ERR("%s killing stalled connection %pMR",
2756 hdev->name, &c->dst);
bed71748 2757 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2758 }
2759 }
bf4c6325
GP
2760
2761 rcu_read_unlock();
1da177e4
LT
2762}
2763
6039aa73
GP
2764static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2765 int *quote)
1da177e4 2766{
73d80deb
LAD
2767 struct hci_conn_hash *h = &hdev->conn_hash;
2768 struct hci_chan *chan = NULL;
abc5de8f 2769 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2770 struct hci_conn *conn;
73d80deb
LAD
2771 int cnt, q, conn_num = 0;
2772
2773 BT_DBG("%s", hdev->name);
2774
bf4c6325
GP
2775 rcu_read_lock();
2776
2777 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2778 struct hci_chan *tmp;
2779
2780 if (conn->type != type)
2781 continue;
2782
2783 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2784 continue;
2785
2786 conn_num++;
2787
8192edef 2788 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2789 struct sk_buff *skb;
2790
2791 if (skb_queue_empty(&tmp->data_q))
2792 continue;
2793
2794 skb = skb_peek(&tmp->data_q);
2795 if (skb->priority < cur_prio)
2796 continue;
2797
2798 if (skb->priority > cur_prio) {
2799 num = 0;
2800 min = ~0;
2801 cur_prio = skb->priority;
2802 }
2803
2804 num++;
2805
2806 if (conn->sent < min) {
2807 min = conn->sent;
2808 chan = tmp;
2809 }
2810 }
2811
2812 if (hci_conn_num(hdev, type) == conn_num)
2813 break;
2814 }
2815
bf4c6325
GP
2816 rcu_read_unlock();
2817
73d80deb
LAD
2818 if (!chan)
2819 return NULL;
2820
2821 switch (chan->conn->type) {
2822 case ACL_LINK:
2823 cnt = hdev->acl_cnt;
2824 break;
bd1eb66b
AE
2825 case AMP_LINK:
2826 cnt = hdev->block_cnt;
2827 break;
73d80deb
LAD
2828 case SCO_LINK:
2829 case ESCO_LINK:
2830 cnt = hdev->sco_cnt;
2831 break;
2832 case LE_LINK:
2833 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2834 break;
2835 default:
2836 cnt = 0;
2837 BT_ERR("Unknown link type");
2838 }
2839
2840 q = cnt / num;
2841 *quote = q ? q : 1;
2842 BT_DBG("chan %p quote %d", chan, *quote);
2843 return chan;
2844}
2845
02b20f0b
LAD
2846static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2847{
2848 struct hci_conn_hash *h = &hdev->conn_hash;
2849 struct hci_conn *conn;
2850 int num = 0;
2851
2852 BT_DBG("%s", hdev->name);
2853
bf4c6325
GP
2854 rcu_read_lock();
2855
2856 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2857 struct hci_chan *chan;
2858
2859 if (conn->type != type)
2860 continue;
2861
2862 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2863 continue;
2864
2865 num++;
2866
8192edef 2867 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2868 struct sk_buff *skb;
2869
2870 if (chan->sent) {
2871 chan->sent = 0;
2872 continue;
2873 }
2874
2875 if (skb_queue_empty(&chan->data_q))
2876 continue;
2877
2878 skb = skb_peek(&chan->data_q);
2879 if (skb->priority >= HCI_PRIO_MAX - 1)
2880 continue;
2881
2882 skb->priority = HCI_PRIO_MAX - 1;
2883
2884 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2885 skb->priority);
02b20f0b
LAD
2886 }
2887
2888 if (hci_conn_num(hdev, type) == num)
2889 break;
2890 }
bf4c6325
GP
2891
2892 rcu_read_unlock();
2893
02b20f0b
LAD
2894}
2895
b71d385a
AE
2896static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2897{
2898 /* Calculate count of blocks used by this packet */
2899 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2900}
2901
6039aa73 2902static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2903{
1da177e4
LT
2904 if (!test_bit(HCI_RAW, &hdev->flags)) {
2905 /* ACL tx timeout must be longer than maximum
2906 * link supervision timeout (40.9 seconds) */
63d2bc1b 2907 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2908 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2909 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2910 }
63d2bc1b 2911}
1da177e4 2912
6039aa73 2913static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2914{
2915 unsigned int cnt = hdev->acl_cnt;
2916 struct hci_chan *chan;
2917 struct sk_buff *skb;
2918 int quote;
2919
2920 __check_timeout(hdev, cnt);
04837f64 2921
73d80deb 2922 while (hdev->acl_cnt &&
a8c5fb1a 2923 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2924 u32 priority = (skb_peek(&chan->data_q))->priority;
2925 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2926 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2927 skb->len, skb->priority);
73d80deb 2928
ec1cce24
LAD
2929 /* Stop if priority has changed */
2930 if (skb->priority < priority)
2931 break;
2932
2933 skb = skb_dequeue(&chan->data_q);
2934
73d80deb 2935 hci_conn_enter_active_mode(chan->conn,
04124681 2936 bt_cb(skb)->force_active);
04837f64 2937
1da177e4
LT
2938 hci_send_frame(skb);
2939 hdev->acl_last_tx = jiffies;
2940
2941 hdev->acl_cnt--;
73d80deb
LAD
2942 chan->sent++;
2943 chan->conn->sent++;
1da177e4
LT
2944 }
2945 }
02b20f0b
LAD
2946
2947 if (cnt != hdev->acl_cnt)
2948 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2949}
2950
6039aa73 2951static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2952{
63d2bc1b 2953 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2954 struct hci_chan *chan;
2955 struct sk_buff *skb;
2956 int quote;
bd1eb66b 2957 u8 type;
b71d385a 2958
63d2bc1b 2959 __check_timeout(hdev, cnt);
b71d385a 2960
bd1eb66b
AE
2961 BT_DBG("%s", hdev->name);
2962
2963 if (hdev->dev_type == HCI_AMP)
2964 type = AMP_LINK;
2965 else
2966 type = ACL_LINK;
2967
b71d385a 2968 while (hdev->block_cnt > 0 &&
bd1eb66b 2969 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
2970 u32 priority = (skb_peek(&chan->data_q))->priority;
2971 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2972 int blocks;
2973
2974 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2975 skb->len, skb->priority);
b71d385a
AE
2976
2977 /* Stop if priority has changed */
2978 if (skb->priority < priority)
2979 break;
2980
2981 skb = skb_dequeue(&chan->data_q);
2982
2983 blocks = __get_blocks(hdev, skb);
2984 if (blocks > hdev->block_cnt)
2985 return;
2986
2987 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2988 bt_cb(skb)->force_active);
b71d385a
AE
2989
2990 hci_send_frame(skb);
2991 hdev->acl_last_tx = jiffies;
2992
2993 hdev->block_cnt -= blocks;
2994 quote -= blocks;
2995
2996 chan->sent += blocks;
2997 chan->conn->sent += blocks;
2998 }
2999 }
3000
3001 if (cnt != hdev->block_cnt)
bd1eb66b 3002 hci_prio_recalculate(hdev, type);
b71d385a
AE
3003}
3004
6039aa73 3005static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3006{
3007 BT_DBG("%s", hdev->name);
3008
bd1eb66b
AE
3009 /* No ACL link over BR/EDR controller */
3010 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3011 return;
3012
3013 /* No AMP link over AMP controller */
3014 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3015 return;
3016
3017 switch (hdev->flow_ctl_mode) {
3018 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3019 hci_sched_acl_pkt(hdev);
3020 break;
3021
3022 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3023 hci_sched_acl_blk(hdev);
3024 break;
3025 }
3026}
3027
1da177e4 3028/* Schedule SCO */
6039aa73 3029static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3030{
3031 struct hci_conn *conn;
3032 struct sk_buff *skb;
3033 int quote;
3034
3035 BT_DBG("%s", hdev->name);
3036
52087a79
LAD
3037 if (!hci_conn_num(hdev, SCO_LINK))
3038 return;
3039
1da177e4
LT
3040 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3041 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3042 BT_DBG("skb %p len %d", skb, skb->len);
3043 hci_send_frame(skb);
3044
3045 conn->sent++;
3046 if (conn->sent == ~0)
3047 conn->sent = 0;
3048 }
3049 }
3050}
3051
6039aa73 3052static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3053{
3054 struct hci_conn *conn;
3055 struct sk_buff *skb;
3056 int quote;
3057
3058 BT_DBG("%s", hdev->name);
3059
52087a79
LAD
3060 if (!hci_conn_num(hdev, ESCO_LINK))
3061 return;
3062
8fc9ced3
GP
3063 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3064 &quote))) {
b6a0dc82
MH
3065 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3066 BT_DBG("skb %p len %d", skb, skb->len);
3067 hci_send_frame(skb);
3068
3069 conn->sent++;
3070 if (conn->sent == ~0)
3071 conn->sent = 0;
3072 }
3073 }
3074}
3075
6039aa73 3076static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3077{
73d80deb 3078 struct hci_chan *chan;
6ed58ec5 3079 struct sk_buff *skb;
02b20f0b 3080 int quote, cnt, tmp;
6ed58ec5
VT
3081
3082 BT_DBG("%s", hdev->name);
3083
52087a79
LAD
3084 if (!hci_conn_num(hdev, LE_LINK))
3085 return;
3086
6ed58ec5
VT
3087 if (!test_bit(HCI_RAW, &hdev->flags)) {
3088 /* LE tx timeout must be longer than maximum
3089 * link supervision timeout (40.9 seconds) */
bae1f5d9 3090 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3091 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3092 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3093 }
3094
3095 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3096 tmp = cnt;
73d80deb 3097 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3098 u32 priority = (skb_peek(&chan->data_q))->priority;
3099 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3100 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3101 skb->len, skb->priority);
6ed58ec5 3102
ec1cce24
LAD
3103 /* Stop if priority has changed */
3104 if (skb->priority < priority)
3105 break;
3106
3107 skb = skb_dequeue(&chan->data_q);
3108
6ed58ec5
VT
3109 hci_send_frame(skb);
3110 hdev->le_last_tx = jiffies;
3111
3112 cnt--;
73d80deb
LAD
3113 chan->sent++;
3114 chan->conn->sent++;
6ed58ec5
VT
3115 }
3116 }
73d80deb 3117
6ed58ec5
VT
3118 if (hdev->le_pkts)
3119 hdev->le_cnt = cnt;
3120 else
3121 hdev->acl_cnt = cnt;
02b20f0b
LAD
3122
3123 if (cnt != tmp)
3124 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3125}
3126
3eff45ea 3127static void hci_tx_work(struct work_struct *work)
1da177e4 3128{
3eff45ea 3129 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3130 struct sk_buff *skb;
3131
6ed58ec5 3132 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3133 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3134
3135 /* Schedule queues and send stuff to HCI driver */
3136
3137 hci_sched_acl(hdev);
3138
3139 hci_sched_sco(hdev);
3140
b6a0dc82
MH
3141 hci_sched_esco(hdev);
3142
6ed58ec5
VT
3143 hci_sched_le(hdev);
3144
1da177e4
LT
3145 /* Send next queued raw (unknown type) packet */
3146 while ((skb = skb_dequeue(&hdev->raw_q)))
3147 hci_send_frame(skb);
1da177e4
LT
3148}
3149
25985edc 3150/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3151
3152/* ACL data packet */
6039aa73 3153static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3154{
3155 struct hci_acl_hdr *hdr = (void *) skb->data;
3156 struct hci_conn *conn;
3157 __u16 handle, flags;
3158
3159 skb_pull(skb, HCI_ACL_HDR_SIZE);
3160
3161 handle = __le16_to_cpu(hdr->handle);
3162 flags = hci_flags(handle);
3163 handle = hci_handle(handle);
3164
f0e09510 3165 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3166 handle, flags);
1da177e4
LT
3167
3168 hdev->stat.acl_rx++;
3169
3170 hci_dev_lock(hdev);
3171 conn = hci_conn_hash_lookup_handle(hdev, handle);
3172 hci_dev_unlock(hdev);
8e87d142 3173
1da177e4 3174 if (conn) {
65983fc7 3175 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3176
1da177e4 3177 /* Send to upper protocol */
686ebf28
UF
3178 l2cap_recv_acldata(conn, skb, flags);
3179 return;
1da177e4 3180 } else {
8e87d142 3181 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3182 hdev->name, handle);
1da177e4
LT
3183 }
3184
3185 kfree_skb(skb);
3186}
3187
3188/* SCO data packet */
6039aa73 3189static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3190{
3191 struct hci_sco_hdr *hdr = (void *) skb->data;
3192 struct hci_conn *conn;
3193 __u16 handle;
3194
3195 skb_pull(skb, HCI_SCO_HDR_SIZE);
3196
3197 handle = __le16_to_cpu(hdr->handle);
3198
f0e09510 3199 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3200
3201 hdev->stat.sco_rx++;
3202
3203 hci_dev_lock(hdev);
3204 conn = hci_conn_hash_lookup_handle(hdev, handle);
3205 hci_dev_unlock(hdev);
3206
3207 if (conn) {
1da177e4 3208 /* Send to upper protocol */
686ebf28
UF
3209 sco_recv_scodata(conn, skb);
3210 return;
1da177e4 3211 } else {
8e87d142 3212 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3213 hdev->name, handle);
1da177e4
LT
3214 }
3215
3216 kfree_skb(skb);
3217}
3218
9238f36a
JH
3219static bool hci_req_is_complete(struct hci_dev *hdev)
3220{
3221 struct sk_buff *skb;
3222
3223 skb = skb_peek(&hdev->cmd_q);
3224 if (!skb)
3225 return true;
3226
3227 return bt_cb(skb)->req.start;
3228}
3229
42c6b129
JH
3230static void hci_resend_last(struct hci_dev *hdev)
3231{
3232 struct hci_command_hdr *sent;
3233 struct sk_buff *skb;
3234 u16 opcode;
3235
3236 if (!hdev->sent_cmd)
3237 return;
3238
3239 sent = (void *) hdev->sent_cmd->data;
3240 opcode = __le16_to_cpu(sent->opcode);
3241 if (opcode == HCI_OP_RESET)
3242 return;
3243
3244 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3245 if (!skb)
3246 return;
3247
3248 skb_queue_head(&hdev->cmd_q, skb);
3249 queue_work(hdev->workqueue, &hdev->cmd_work);
3250}
3251
9238f36a
JH
3252void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3253{
3254 hci_req_complete_t req_complete = NULL;
3255 struct sk_buff *skb;
3256 unsigned long flags;
3257
3258 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3259
42c6b129
JH
3260 /* If the completed command doesn't match the last one that was
3261 * sent we need to do special handling of it.
9238f36a 3262 */
42c6b129
JH
3263 if (!hci_sent_cmd_data(hdev, opcode)) {
3264 /* Some CSR based controllers generate a spontaneous
3265 * reset complete event during init and any pending
3266 * command will never be completed. In such a case we
3267 * need to resend whatever was the last sent
3268 * command.
3269 */
3270 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3271 hci_resend_last(hdev);
3272
9238f36a 3273 return;
42c6b129 3274 }
9238f36a
JH
3275
3276 /* If the command succeeded and there's still more commands in
3277 * this request the request is not yet complete.
3278 */
3279 if (!status && !hci_req_is_complete(hdev))
3280 return;
3281
3282 /* If this was the last command in a request the complete
3283 * callback would be found in hdev->sent_cmd instead of the
3284 * command queue (hdev->cmd_q).
3285 */
3286 if (hdev->sent_cmd) {
3287 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3288 if (req_complete)
3289 goto call_complete;
3290 }
3291
3292 /* Remove all pending commands belonging to this request */
3293 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3294 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3295 if (bt_cb(skb)->req.start) {
3296 __skb_queue_head(&hdev->cmd_q, skb);
3297 break;
3298 }
3299
3300 req_complete = bt_cb(skb)->req.complete;
3301 kfree_skb(skb);
3302 }
3303 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3304
3305call_complete:
3306 if (req_complete)
3307 req_complete(hdev, status);
3308}
3309
3310void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3311{
3312 hci_req_complete_t req_complete = NULL;
3313
3314 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3315
3316 if (status) {
3317 hci_req_cmd_complete(hdev, opcode, status);
3318 return;
3319 }
3320
3321 /* No need to handle success status if there are more commands */
3322 if (!hci_req_is_complete(hdev))
3323 return;
3324
3325 if (hdev->sent_cmd)
3326 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3327
3328 /* If the request doesn't have a complete callback or there
3329 * are other commands/requests in the hdev queue we consider
3330 * this request as completed.
3331 */
3332 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3333 hci_req_cmd_complete(hdev, opcode, status);
3334}
3335
b78752cc 3336static void hci_rx_work(struct work_struct *work)
1da177e4 3337{
b78752cc 3338 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3339 struct sk_buff *skb;
3340
3341 BT_DBG("%s", hdev->name);
3342
1da177e4 3343 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3344 /* Send copy to monitor */
3345 hci_send_to_monitor(hdev, skb);
3346
1da177e4
LT
3347 if (atomic_read(&hdev->promisc)) {
3348 /* Send copy to the sockets */
470fe1b5 3349 hci_send_to_sock(hdev, skb);
1da177e4
LT
3350 }
3351
3352 if (test_bit(HCI_RAW, &hdev->flags)) {
3353 kfree_skb(skb);
3354 continue;
3355 }
3356
3357 if (test_bit(HCI_INIT, &hdev->flags)) {
3358 /* Don't process data packets in this states. */
0d48d939 3359 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3360 case HCI_ACLDATA_PKT:
3361 case HCI_SCODATA_PKT:
3362 kfree_skb(skb);
3363 continue;
3ff50b79 3364 }
1da177e4
LT
3365 }
3366
3367 /* Process frame */
0d48d939 3368 switch (bt_cb(skb)->pkt_type) {
1da177e4 3369 case HCI_EVENT_PKT:
b78752cc 3370 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3371 hci_event_packet(hdev, skb);
3372 break;
3373
3374 case HCI_ACLDATA_PKT:
3375 BT_DBG("%s ACL data packet", hdev->name);
3376 hci_acldata_packet(hdev, skb);
3377 break;
3378
3379 case HCI_SCODATA_PKT:
3380 BT_DBG("%s SCO data packet", hdev->name);
3381 hci_scodata_packet(hdev, skb);
3382 break;
3383
3384 default:
3385 kfree_skb(skb);
3386 break;
3387 }
3388 }
1da177e4
LT
3389}
3390
c347b765 3391static void hci_cmd_work(struct work_struct *work)
1da177e4 3392{
c347b765 3393 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3394 struct sk_buff *skb;
3395
2104786b
AE
3396 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3397 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3398
1da177e4 3399 /* Send queued commands */
5a08ecce
AE
3400 if (atomic_read(&hdev->cmd_cnt)) {
3401 skb = skb_dequeue(&hdev->cmd_q);
3402 if (!skb)
3403 return;
3404
7585b97a 3405 kfree_skb(hdev->sent_cmd);
1da177e4 3406
70f23020
AE
3407 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3408 if (hdev->sent_cmd) {
1da177e4
LT
3409 atomic_dec(&hdev->cmd_cnt);
3410 hci_send_frame(skb);
7bdb8a5c
SJ
3411 if (test_bit(HCI_RESET, &hdev->flags))
3412 del_timer(&hdev->cmd_timer);
3413 else
3414 mod_timer(&hdev->cmd_timer,
5f246e89 3415 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3416 } else {
3417 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3418 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3419 }
3420 }
3421}
2519a1fc
AG
3422
3423int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3424{
3425 /* General inquiry access code (GIAC) */
3426 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3427 struct hci_cp_inquiry cp;
3428
3429 BT_DBG("%s", hdev->name);
3430
3431 if (test_bit(HCI_INQUIRY, &hdev->flags))
3432 return -EINPROGRESS;
3433
4663262c
JH
3434 inquiry_cache_flush(hdev);
3435
2519a1fc
AG
3436 memset(&cp, 0, sizeof(cp));
3437 memcpy(&cp.lap, lap, sizeof(cp.lap));
3438 cp.length = length;
3439
3440 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3441}
023d5049
AG
3442
3443int hci_cancel_inquiry(struct hci_dev *hdev)
3444{
3445 BT_DBG("%s", hdev->name);
3446
3447 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 3448 return -EALREADY;
023d5049
AG
3449
3450 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3451}
31f7956c
AG
3452
3453u8 bdaddr_to_le(u8 bdaddr_type)
3454{
3455 switch (bdaddr_type) {
3456 case BDADDR_LE_PUBLIC:
3457 return ADDR_LE_DEV_PUBLIC;
3458
3459 default:
3460 /* Fallback to LE Random address type */
3461 return ADDR_LE_DEV_RANDOM;
3462 }
3463}