Bluetooth: Disable fast connectable when disabling connectable
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
01178cd4 83static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
84 void (*func)(struct hci_request *req,
85 unsigned long opt),
01178cd4 86 unsigned long opt, __u32 timeout)
1da177e4 87{
42c6b129 88 struct hci_request req;
1da177e4
LT
89 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
42c6b129
JH
94 hci_req_init(&req, hdev);
95
1da177e4
LT
96 hdev->req_status = HCI_REQ_PEND;
97
42c6b129 98 func(&req, opt);
53cce22d 99
42c6b129
JH
100 err = hci_req_run(&req, hci_req_sync_complete);
101 if (err < 0) {
53cce22d 102 hdev->req_status = 0;
920c8300
AG
103
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
42c6b129 108 */
920c8300
AG
109 if (err == -ENODATA)
110 return 0;
111
112 return err;
53cce22d
JH
113 }
114
bc4445c7
AG
115 add_wait_queue(&hdev->req_wait_q, &wait);
116 set_current_state(TASK_INTERRUPTIBLE);
117
1da177e4
LT
118 schedule_timeout(timeout);
119
120 remove_wait_queue(&hdev->req_wait_q, &wait);
121
122 if (signal_pending(current))
123 return -EINTR;
124
125 switch (hdev->req_status) {
126 case HCI_REQ_DONE:
e175072f 127 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
128 break;
129
130 case HCI_REQ_CANCELED:
131 err = -hdev->req_result;
132 break;
133
134 default:
135 err = -ETIMEDOUT;
136 break;
3ff50b79 137 }
1da177e4 138
a5040efa 139 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
140
141 BT_DBG("%s end: err %d", hdev->name, err);
142
143 return err;
144}
145
01178cd4 146static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
147 void (*req)(struct hci_request *req,
148 unsigned long opt),
01178cd4 149 unsigned long opt, __u32 timeout)
1da177e4
LT
150{
151 int ret;
152
7c6a329e
MH
153 if (!test_bit(HCI_UP, &hdev->flags))
154 return -ENETDOWN;
155
1da177e4
LT
156 /* Serialize all requests */
157 hci_req_lock(hdev);
01178cd4 158 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
159 hci_req_unlock(hdev);
160
161 return ret;
162}
163
42c6b129 164static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 165{
42c6b129 166 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
167
168 /* Reset device */
42c6b129
JH
169 set_bit(HCI_RESET, &req->hdev->flags);
170 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
171}
172
42c6b129 173static void bredr_init(struct hci_request *req)
1da177e4 174{
42c6b129 175 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 176
1da177e4 177 /* Read Local Supported Features */
42c6b129 178 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 179
1143e5a6 180 /* Read Local Version */
42c6b129 181 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
182
183 /* Read BD Address */
42c6b129 184 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
185}
186
42c6b129 187static void amp_init(struct hci_request *req)
e61ef499 188{
42c6b129 189 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 190
e61ef499 191 /* Read Local Version */
42c6b129 192 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
193
194 /* Read Local AMP Info */
42c6b129 195 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
196
197 /* Read Data Blk size */
42c6b129 198 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
199}
200
42c6b129 201static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 202{
42c6b129
JH
203 struct hci_dev *hdev = req->hdev;
204 struct hci_request init_req;
e61ef499
AE
205 struct sk_buff *skb;
206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
42c6b129
JH
211 hci_req_init(&init_req, hdev);
212
e61ef499
AE
213 /* Special commands */
214 while ((skb = skb_dequeue(&hdev->driver_init))) {
215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216 skb->dev = (void *) hdev;
217
42c6b129
JH
218 if (skb_queue_empty(&init_req.cmd_q))
219 bt_cb(skb)->req.start = true;
220
221 skb_queue_tail(&init_req.cmd_q, skb);
e61ef499
AE
222 }
223 skb_queue_purge(&hdev->driver_init);
224
42c6b129
JH
225 hci_req_run(&init_req, NULL);
226
11778716
AE
227 /* Reset */
228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 229 hci_reset_req(req, 0);
11778716 230
e61ef499
AE
231 switch (hdev->dev_type) {
232 case HCI_BREDR:
42c6b129 233 bredr_init(req);
e61ef499
AE
234 break;
235
236 case HCI_AMP:
42c6b129 237 amp_init(req);
e61ef499
AE
238 break;
239
240 default:
241 BT_ERR("Unknown device type %d", hdev->dev_type);
242 break;
243 }
e61ef499
AE
244}
245
42c6b129 246static void bredr_setup(struct hci_request *req)
2177bab5
JH
247{
248 struct hci_cp_delete_stored_link_key cp;
249 __le16 param;
250 __u8 flt_type;
251
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 253 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
254
255 /* Read Class of Device */
42c6b129 256 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
257
258 /* Read Local Name */
42c6b129 259 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
260
261 /* Read Voice Setting */
42c6b129 262 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
263
264 /* Clear Event Filters */
265 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 266 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
267
268 /* Connection accept timeout ~20 secs */
269 param = __constant_cpu_to_le16(0x7d00);
42c6b129 270 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
271
272 bacpy(&cp.bdaddr, BDADDR_ANY);
273 cp.delete_all = 0x01;
42c6b129 274 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
2177bab5
JH
275}
276
42c6b129 277static void le_setup(struct hci_request *req)
2177bab5
JH
278{
279 /* Read LE Buffer Size */
42c6b129 280 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
281
282 /* Read LE Local Supported Features */
42c6b129 283 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
284
285 /* Read LE Advertising Channel TX Power */
42c6b129 286 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
287
288 /* Read LE White List Size */
42c6b129 289 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
290
291 /* Read LE Supported States */
42c6b129 292 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
2177bab5
JH
293}
294
295static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
296{
297 if (lmp_ext_inq_capable(hdev))
298 return 0x02;
299
300 if (lmp_inq_rssi_capable(hdev))
301 return 0x01;
302
303 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
304 hdev->lmp_subver == 0x0757)
305 return 0x01;
306
307 if (hdev->manufacturer == 15) {
308 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
309 return 0x01;
310 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
311 return 0x01;
312 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
313 return 0x01;
314 }
315
316 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
317 hdev->lmp_subver == 0x1805)
318 return 0x01;
319
320 return 0x00;
321}
322
42c6b129 323static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
324{
325 u8 mode;
326
42c6b129 327 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 328
42c6b129 329 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
330}
331
42c6b129 332static void hci_setup_event_mask(struct hci_request *req)
2177bab5 333{
42c6b129
JH
334 struct hci_dev *hdev = req->hdev;
335
2177bab5
JH
336 /* The second byte is 0xff instead of 0x9f (two reserved bits
337 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
338 * command otherwise.
339 */
340 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
341
342 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
343 * any event mask for pre 1.2 devices.
344 */
345 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
346 return;
347
348 if (lmp_bredr_capable(hdev)) {
349 events[4] |= 0x01; /* Flow Specification Complete */
350 events[4] |= 0x02; /* Inquiry Result with RSSI */
351 events[4] |= 0x04; /* Read Remote Extended Features Complete */
352 events[5] |= 0x08; /* Synchronous Connection Complete */
353 events[5] |= 0x10; /* Synchronous Connection Changed */
354 }
355
356 if (lmp_inq_rssi_capable(hdev))
357 events[4] |= 0x02; /* Inquiry Result with RSSI */
358
359 if (lmp_sniffsubr_capable(hdev))
360 events[5] |= 0x20; /* Sniff Subrating */
361
362 if (lmp_pause_enc_capable(hdev))
363 events[5] |= 0x80; /* Encryption Key Refresh Complete */
364
365 if (lmp_ext_inq_capable(hdev))
366 events[5] |= 0x40; /* Extended Inquiry Result */
367
368 if (lmp_no_flush_capable(hdev))
369 events[7] |= 0x01; /* Enhanced Flush Complete */
370
371 if (lmp_lsto_capable(hdev))
372 events[6] |= 0x80; /* Link Supervision Timeout Changed */
373
374 if (lmp_ssp_capable(hdev)) {
375 events[6] |= 0x01; /* IO Capability Request */
376 events[6] |= 0x02; /* IO Capability Response */
377 events[6] |= 0x04; /* User Confirmation Request */
378 events[6] |= 0x08; /* User Passkey Request */
379 events[6] |= 0x10; /* Remote OOB Data Request */
380 events[6] |= 0x20; /* Simple Pairing Complete */
381 events[7] |= 0x04; /* User Passkey Notification */
382 events[7] |= 0x08; /* Keypress Notification */
383 events[7] |= 0x10; /* Remote Host Supported
384 * Features Notification
385 */
386 }
387
388 if (lmp_le_capable(hdev))
389 events[7] |= 0x20; /* LE Meta-Event */
390
42c6b129 391 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
392
393 if (lmp_le_capable(hdev)) {
394 memset(events, 0, sizeof(events));
395 events[0] = 0x1f;
42c6b129
JH
396 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
397 sizeof(events), events);
2177bab5
JH
398 }
399}
400
42c6b129 401static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 402{
42c6b129
JH
403 struct hci_dev *hdev = req->hdev;
404
2177bab5 405 if (lmp_bredr_capable(hdev))
42c6b129 406 bredr_setup(req);
2177bab5
JH
407
408 if (lmp_le_capable(hdev))
42c6b129 409 le_setup(req);
2177bab5 410
42c6b129 411 hci_setup_event_mask(req);
2177bab5
JH
412
413 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 414 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
415
416 if (lmp_ssp_capable(hdev)) {
417 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
418 u8 mode = 0x01;
42c6b129
JH
419 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
420 sizeof(mode), &mode);
2177bab5
JH
421 } else {
422 struct hci_cp_write_eir cp;
423
424 memset(hdev->eir, 0, sizeof(hdev->eir));
425 memset(&cp, 0, sizeof(cp));
426
42c6b129 427 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
428 }
429 }
430
431 if (lmp_inq_rssi_capable(hdev))
42c6b129 432 hci_setup_inquiry_mode(req);
2177bab5
JH
433
434 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 435 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
436
437 if (lmp_ext_feat_capable(hdev)) {
438 struct hci_cp_read_local_ext_features cp;
439
440 cp.page = 0x01;
42c6b129
JH
441 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
442 sizeof(cp), &cp);
2177bab5
JH
443 }
444
445 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
446 u8 enable = 1;
42c6b129
JH
447 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
448 &enable);
2177bab5
JH
449 }
450}
451
42c6b129 452static void hci_setup_link_policy(struct hci_request *req)
2177bab5 453{
42c6b129 454 struct hci_dev *hdev = req->hdev;
2177bab5
JH
455 struct hci_cp_write_def_link_policy cp;
456 u16 link_policy = 0;
457
458 if (lmp_rswitch_capable(hdev))
459 link_policy |= HCI_LP_RSWITCH;
460 if (lmp_hold_capable(hdev))
461 link_policy |= HCI_LP_HOLD;
462 if (lmp_sniff_capable(hdev))
463 link_policy |= HCI_LP_SNIFF;
464 if (lmp_park_capable(hdev))
465 link_policy |= HCI_LP_PARK;
466
467 cp.policy = cpu_to_le16(link_policy);
42c6b129 468 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
469}
470
42c6b129 471static void hci_set_le_support(struct hci_request *req)
2177bab5 472{
42c6b129 473 struct hci_dev *hdev = req->hdev;
2177bab5
JH
474 struct hci_cp_write_le_host_supported cp;
475
476 memset(&cp, 0, sizeof(cp));
477
478 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
479 cp.le = 0x01;
480 cp.simul = lmp_le_br_capable(hdev);
481 }
482
483 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
484 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
485 &cp);
2177bab5
JH
486}
487
42c6b129 488static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 489{
42c6b129
JH
490 struct hci_dev *hdev = req->hdev;
491
2177bab5 492 if (hdev->commands[5] & 0x10)
42c6b129 493 hci_setup_link_policy(req);
2177bab5 494
04b4edcb 495 if (lmp_le_capable(hdev)) {
42c6b129 496 hci_set_le_support(req);
04b4edcb
JH
497 hci_update_ad(req);
498 }
2177bab5
JH
499}
500
501static int __hci_init(struct hci_dev *hdev)
502{
503 int err;
504
505 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
506 if (err < 0)
507 return err;
508
509 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
510 * BR/EDR/LE type controllers. AMP controllers only need the
511 * first stage init.
512 */
513 if (hdev->dev_type != HCI_BREDR)
514 return 0;
515
516 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
517 if (err < 0)
518 return err;
519
520 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
521}
522
42c6b129 523static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
524{
525 __u8 scan = opt;
526
42c6b129 527 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
528
529 /* Inquiry and Page scans */
42c6b129 530 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
531}
532
42c6b129 533static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
534{
535 __u8 auth = opt;
536
42c6b129 537 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
538
539 /* Authentication */
42c6b129 540 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
541}
542
42c6b129 543static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
544{
545 __u8 encrypt = opt;
546
42c6b129 547 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 548
e4e8e37c 549 /* Encryption */
42c6b129 550 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
551}
552
42c6b129 553static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
554{
555 __le16 policy = cpu_to_le16(opt);
556
42c6b129 557 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
558
559 /* Default link policy */
42c6b129 560 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
561}
562
8e87d142 563/* Get HCI device by index.
1da177e4
LT
564 * Device is held on return. */
565struct hci_dev *hci_dev_get(int index)
566{
8035ded4 567 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
568
569 BT_DBG("%d", index);
570
571 if (index < 0)
572 return NULL;
573
574 read_lock(&hci_dev_list_lock);
8035ded4 575 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
576 if (d->id == index) {
577 hdev = hci_dev_hold(d);
578 break;
579 }
580 }
581 read_unlock(&hci_dev_list_lock);
582 return hdev;
583}
1da177e4
LT
584
585/* ---- Inquiry support ---- */
ff9ef578 586
30dc78e1
JH
587bool hci_discovery_active(struct hci_dev *hdev)
588{
589 struct discovery_state *discov = &hdev->discovery;
590
6fbe195d 591 switch (discov->state) {
343f935b 592 case DISCOVERY_FINDING:
6fbe195d 593 case DISCOVERY_RESOLVING:
30dc78e1
JH
594 return true;
595
6fbe195d
AG
596 default:
597 return false;
598 }
30dc78e1
JH
599}
600
ff9ef578
JH
601void hci_discovery_set_state(struct hci_dev *hdev, int state)
602{
603 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
604
605 if (hdev->discovery.state == state)
606 return;
607
608 switch (state) {
609 case DISCOVERY_STOPPED:
7b99b659
AG
610 if (hdev->discovery.state != DISCOVERY_STARTING)
611 mgmt_discovering(hdev, 0);
ff9ef578
JH
612 break;
613 case DISCOVERY_STARTING:
614 break;
343f935b 615 case DISCOVERY_FINDING:
ff9ef578
JH
616 mgmt_discovering(hdev, 1);
617 break;
30dc78e1
JH
618 case DISCOVERY_RESOLVING:
619 break;
ff9ef578
JH
620 case DISCOVERY_STOPPING:
621 break;
622 }
623
624 hdev->discovery.state = state;
625}
626
1da177e4
LT
627static void inquiry_cache_flush(struct hci_dev *hdev)
628{
30883512 629 struct discovery_state *cache = &hdev->discovery;
b57c1a56 630 struct inquiry_entry *p, *n;
1da177e4 631
561aafbc
JH
632 list_for_each_entry_safe(p, n, &cache->all, all) {
633 list_del(&p->all);
b57c1a56 634 kfree(p);
1da177e4 635 }
561aafbc
JH
636
637 INIT_LIST_HEAD(&cache->unknown);
638 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
639}
640
a8c5fb1a
GP
641struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
642 bdaddr_t *bdaddr)
1da177e4 643{
30883512 644 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
645 struct inquiry_entry *e;
646
6ed93dc6 647 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 648
561aafbc
JH
649 list_for_each_entry(e, &cache->all, all) {
650 if (!bacmp(&e->data.bdaddr, bdaddr))
651 return e;
652 }
653
654 return NULL;
655}
656
657struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 658 bdaddr_t *bdaddr)
561aafbc 659{
30883512 660 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
661 struct inquiry_entry *e;
662
6ed93dc6 663 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
664
665 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 666 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
667 return e;
668 }
669
670 return NULL;
1da177e4
LT
671}
672
30dc78e1 673struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
674 bdaddr_t *bdaddr,
675 int state)
30dc78e1
JH
676{
677 struct discovery_state *cache = &hdev->discovery;
678 struct inquiry_entry *e;
679
6ed93dc6 680 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
681
682 list_for_each_entry(e, &cache->resolve, list) {
683 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
684 return e;
685 if (!bacmp(&e->data.bdaddr, bdaddr))
686 return e;
687 }
688
689 return NULL;
690}
691
a3d4e20a 692void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 693 struct inquiry_entry *ie)
a3d4e20a
JH
694{
695 struct discovery_state *cache = &hdev->discovery;
696 struct list_head *pos = &cache->resolve;
697 struct inquiry_entry *p;
698
699 list_del(&ie->list);
700
701 list_for_each_entry(p, &cache->resolve, list) {
702 if (p->name_state != NAME_PENDING &&
a8c5fb1a 703 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
704 break;
705 pos = &p->list;
706 }
707
708 list_add(&ie->list, pos);
709}
710
3175405b 711bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 712 bool name_known, bool *ssp)
1da177e4 713{
30883512 714 struct discovery_state *cache = &hdev->discovery;
70f23020 715 struct inquiry_entry *ie;
1da177e4 716
6ed93dc6 717 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 718
2b2fec4d
SJ
719 hci_remove_remote_oob_data(hdev, &data->bdaddr);
720
388fc8fa
JH
721 if (ssp)
722 *ssp = data->ssp_mode;
723
70f23020 724 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 725 if (ie) {
388fc8fa
JH
726 if (ie->data.ssp_mode && ssp)
727 *ssp = true;
728
a3d4e20a 729 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 730 data->rssi != ie->data.rssi) {
a3d4e20a
JH
731 ie->data.rssi = data->rssi;
732 hci_inquiry_cache_update_resolve(hdev, ie);
733 }
734
561aafbc 735 goto update;
a3d4e20a 736 }
561aafbc
JH
737
738 /* Entry not in the cache. Add new one. */
739 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
740 if (!ie)
3175405b 741 return false;
561aafbc
JH
742
743 list_add(&ie->all, &cache->all);
744
745 if (name_known) {
746 ie->name_state = NAME_KNOWN;
747 } else {
748 ie->name_state = NAME_NOT_KNOWN;
749 list_add(&ie->list, &cache->unknown);
750 }
70f23020 751
561aafbc
JH
752update:
753 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 754 ie->name_state != NAME_PENDING) {
561aafbc
JH
755 ie->name_state = NAME_KNOWN;
756 list_del(&ie->list);
1da177e4
LT
757 }
758
70f23020
AE
759 memcpy(&ie->data, data, sizeof(*data));
760 ie->timestamp = jiffies;
1da177e4 761 cache->timestamp = jiffies;
3175405b
JH
762
763 if (ie->name_state == NAME_NOT_KNOWN)
764 return false;
765
766 return true;
1da177e4
LT
767}
768
769static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
770{
30883512 771 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
772 struct inquiry_info *info = (struct inquiry_info *) buf;
773 struct inquiry_entry *e;
774 int copied = 0;
775
561aafbc 776 list_for_each_entry(e, &cache->all, all) {
1da177e4 777 struct inquiry_data *data = &e->data;
b57c1a56
JH
778
779 if (copied >= num)
780 break;
781
1da177e4
LT
782 bacpy(&info->bdaddr, &data->bdaddr);
783 info->pscan_rep_mode = data->pscan_rep_mode;
784 info->pscan_period_mode = data->pscan_period_mode;
785 info->pscan_mode = data->pscan_mode;
786 memcpy(info->dev_class, data->dev_class, 3);
787 info->clock_offset = data->clock_offset;
b57c1a56 788
1da177e4 789 info++;
b57c1a56 790 copied++;
1da177e4
LT
791 }
792
793 BT_DBG("cache %p, copied %d", cache, copied);
794 return copied;
795}
796
42c6b129 797static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
798{
799 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 800 struct hci_dev *hdev = req->hdev;
1da177e4
LT
801 struct hci_cp_inquiry cp;
802
803 BT_DBG("%s", hdev->name);
804
805 if (test_bit(HCI_INQUIRY, &hdev->flags))
806 return;
807
808 /* Start Inquiry */
809 memcpy(&cp.lap, &ir->lap, 3);
810 cp.length = ir->length;
811 cp.num_rsp = ir->num_rsp;
42c6b129 812 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
813}
814
815int hci_inquiry(void __user *arg)
816{
817 __u8 __user *ptr = arg;
818 struct hci_inquiry_req ir;
819 struct hci_dev *hdev;
820 int err = 0, do_inquiry = 0, max_rsp;
821 long timeo;
822 __u8 *buf;
823
824 if (copy_from_user(&ir, ptr, sizeof(ir)))
825 return -EFAULT;
826
5a08ecce
AE
827 hdev = hci_dev_get(ir.dev_id);
828 if (!hdev)
1da177e4
LT
829 return -ENODEV;
830
09fd0de5 831 hci_dev_lock(hdev);
8e87d142 832 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 833 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
834 inquiry_cache_flush(hdev);
835 do_inquiry = 1;
836 }
09fd0de5 837 hci_dev_unlock(hdev);
1da177e4 838
04837f64 839 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
840
841 if (do_inquiry) {
01178cd4
JH
842 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
843 timeo);
70f23020
AE
844 if (err < 0)
845 goto done;
846 }
1da177e4 847
8fc9ced3
GP
848 /* for unlimited number of responses we will use buffer with
849 * 255 entries
850 */
1da177e4
LT
851 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
852
853 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
854 * copy it to the user space.
855 */
01df8c31 856 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 857 if (!buf) {
1da177e4
LT
858 err = -ENOMEM;
859 goto done;
860 }
861
09fd0de5 862 hci_dev_lock(hdev);
1da177e4 863 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 864 hci_dev_unlock(hdev);
1da177e4
LT
865
866 BT_DBG("num_rsp %d", ir.num_rsp);
867
868 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
869 ptr += sizeof(ir);
870 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 871 ir.num_rsp))
1da177e4 872 err = -EFAULT;
8e87d142 873 } else
1da177e4
LT
874 err = -EFAULT;
875
876 kfree(buf);
877
878done:
879 hci_dev_put(hdev);
880 return err;
881}
882
3f0f524b
JH
883static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
884{
885 u8 ad_len = 0, flags = 0;
886 size_t name_len;
887
888 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
889 flags |= LE_AD_GENERAL;
890
891 if (!lmp_bredr_capable(hdev))
892 flags |= LE_AD_NO_BREDR;
893
894 if (lmp_le_br_capable(hdev))
895 flags |= LE_AD_SIM_LE_BREDR_CTRL;
896
897 if (lmp_host_le_br_capable(hdev))
898 flags |= LE_AD_SIM_LE_BREDR_HOST;
899
900 if (flags) {
901 BT_DBG("adv flags 0x%02x", flags);
902
903 ptr[0] = 2;
904 ptr[1] = EIR_FLAGS;
905 ptr[2] = flags;
906
907 ad_len += 3;
908 ptr += 3;
909 }
910
911 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
912 ptr[0] = 2;
913 ptr[1] = EIR_TX_POWER;
914 ptr[2] = (u8) hdev->adv_tx_power;
915
916 ad_len += 3;
917 ptr += 3;
918 }
919
920 name_len = strlen(hdev->dev_name);
921 if (name_len > 0) {
922 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
923
924 if (name_len > max_len) {
925 name_len = max_len;
926 ptr[1] = EIR_NAME_SHORT;
927 } else
928 ptr[1] = EIR_NAME_COMPLETE;
929
930 ptr[0] = name_len + 1;
931
932 memcpy(ptr + 2, hdev->dev_name, name_len);
933
934 ad_len += (name_len + 2);
935 ptr += (name_len + 2);
936 }
937
938 return ad_len;
939}
940
04b4edcb 941void hci_update_ad(struct hci_request *req)
3f0f524b 942{
04b4edcb 943 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
944 struct hci_cp_le_set_adv_data cp;
945 u8 len;
3f0f524b 946
04b4edcb
JH
947 if (!lmp_le_capable(hdev))
948 return;
3f0f524b
JH
949
950 memset(&cp, 0, sizeof(cp));
951
952 len = create_ad(hdev, cp.data);
953
954 if (hdev->adv_data_len == len &&
04b4edcb
JH
955 memcmp(cp.data, hdev->adv_data, len) == 0)
956 return;
3f0f524b
JH
957
958 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
959 hdev->adv_data_len = len;
960
961 cp.length = len;
3f0f524b 962
04b4edcb 963 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
964}
965
1da177e4
LT
966/* ---- HCI ioctl helpers ---- */
967
968int hci_dev_open(__u16 dev)
969{
970 struct hci_dev *hdev;
971 int ret = 0;
972
5a08ecce
AE
973 hdev = hci_dev_get(dev);
974 if (!hdev)
1da177e4
LT
975 return -ENODEV;
976
977 BT_DBG("%s %p", hdev->name, hdev);
978
979 hci_req_lock(hdev);
980
94324962
JH
981 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
982 ret = -ENODEV;
983 goto done;
984 }
985
611b30f7
MH
986 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
987 ret = -ERFKILL;
988 goto done;
989 }
990
1da177e4
LT
991 if (test_bit(HCI_UP, &hdev->flags)) {
992 ret = -EALREADY;
993 goto done;
994 }
995
996 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
997 set_bit(HCI_RAW, &hdev->flags);
998
07e3b94a
AE
999 /* Treat all non BR/EDR controllers as raw devices if
1000 enable_hs is not set */
1001 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
1002 set_bit(HCI_RAW, &hdev->flags);
1003
1da177e4
LT
1004 if (hdev->open(hdev)) {
1005 ret = -EIO;
1006 goto done;
1007 }
1008
1009 if (!test_bit(HCI_RAW, &hdev->flags)) {
1010 atomic_set(&hdev->cmd_cnt, 1);
1011 set_bit(HCI_INIT, &hdev->flags);
2177bab5 1012 ret = __hci_init(hdev);
1da177e4
LT
1013 clear_bit(HCI_INIT, &hdev->flags);
1014 }
1015
1016 if (!ret) {
1017 hci_dev_hold(hdev);
1018 set_bit(HCI_UP, &hdev->flags);
1019 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a
AE
1020 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1021 mgmt_valid_hdev(hdev)) {
09fd0de5 1022 hci_dev_lock(hdev);
744cf19e 1023 mgmt_powered(hdev, 1);
09fd0de5 1024 hci_dev_unlock(hdev);
56e5cb86 1025 }
8e87d142 1026 } else {
1da177e4 1027 /* Init failed, cleanup */
3eff45ea 1028 flush_work(&hdev->tx_work);
c347b765 1029 flush_work(&hdev->cmd_work);
b78752cc 1030 flush_work(&hdev->rx_work);
1da177e4
LT
1031
1032 skb_queue_purge(&hdev->cmd_q);
1033 skb_queue_purge(&hdev->rx_q);
1034
1035 if (hdev->flush)
1036 hdev->flush(hdev);
1037
1038 if (hdev->sent_cmd) {
1039 kfree_skb(hdev->sent_cmd);
1040 hdev->sent_cmd = NULL;
1041 }
1042
1043 hdev->close(hdev);
1044 hdev->flags = 0;
1045 }
1046
1047done:
1048 hci_req_unlock(hdev);
1049 hci_dev_put(hdev);
1050 return ret;
1051}
1052
1053static int hci_dev_do_close(struct hci_dev *hdev)
1054{
1055 BT_DBG("%s %p", hdev->name, hdev);
1056
28b75a89
AG
1057 cancel_work_sync(&hdev->le_scan);
1058
78c04c0b
VCG
1059 cancel_delayed_work(&hdev->power_off);
1060
1da177e4
LT
1061 hci_req_cancel(hdev, ENODEV);
1062 hci_req_lock(hdev);
1063
1064 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1065 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1066 hci_req_unlock(hdev);
1067 return 0;
1068 }
1069
3eff45ea
GP
1070 /* Flush RX and TX works */
1071 flush_work(&hdev->tx_work);
b78752cc 1072 flush_work(&hdev->rx_work);
1da177e4 1073
16ab91ab 1074 if (hdev->discov_timeout > 0) {
e0f9309f 1075 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1076 hdev->discov_timeout = 0;
5e5282bb 1077 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1078 }
1079
a8b2d5c2 1080 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1081 cancel_delayed_work(&hdev->service_cache);
1082
7ba8b4be
AG
1083 cancel_delayed_work_sync(&hdev->le_scan_disable);
1084
09fd0de5 1085 hci_dev_lock(hdev);
1da177e4
LT
1086 inquiry_cache_flush(hdev);
1087 hci_conn_hash_flush(hdev);
09fd0de5 1088 hci_dev_unlock(hdev);
1da177e4
LT
1089
1090 hci_notify(hdev, HCI_DEV_DOWN);
1091
1092 if (hdev->flush)
1093 hdev->flush(hdev);
1094
1095 /* Reset device */
1096 skb_queue_purge(&hdev->cmd_q);
1097 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1098 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1099 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1100 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1101 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1102 clear_bit(HCI_INIT, &hdev->flags);
1103 }
1104
c347b765
GP
1105 /* flush cmd work */
1106 flush_work(&hdev->cmd_work);
1da177e4
LT
1107
1108 /* Drop queues */
1109 skb_queue_purge(&hdev->rx_q);
1110 skb_queue_purge(&hdev->cmd_q);
1111 skb_queue_purge(&hdev->raw_q);
1112
1113 /* Drop last sent command */
1114 if (hdev->sent_cmd) {
b79f44c1 1115 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1116 kfree_skb(hdev->sent_cmd);
1117 hdev->sent_cmd = NULL;
1118 }
1119
1120 /* After this point our queues are empty
1121 * and no tasks are scheduled. */
1122 hdev->close(hdev);
1123
35b973c9
JH
1124 /* Clear flags */
1125 hdev->flags = 0;
1126 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1127
bb4b2a9a
AE
1128 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1129 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1130 hci_dev_lock(hdev);
1131 mgmt_powered(hdev, 0);
1132 hci_dev_unlock(hdev);
1133 }
5add6af8 1134
ced5c338
AE
1135 /* Controller radio is available but is currently powered down */
1136 hdev->amp_status = 0;
1137
e59fda8d 1138 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1139 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1140
1da177e4
LT
1141 hci_req_unlock(hdev);
1142
1143 hci_dev_put(hdev);
1144 return 0;
1145}
1146
1147int hci_dev_close(__u16 dev)
1148{
1149 struct hci_dev *hdev;
1150 int err;
1151
70f23020
AE
1152 hdev = hci_dev_get(dev);
1153 if (!hdev)
1da177e4 1154 return -ENODEV;
8ee56540
MH
1155
1156 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1157 cancel_delayed_work(&hdev->power_off);
1158
1da177e4 1159 err = hci_dev_do_close(hdev);
8ee56540 1160
1da177e4
LT
1161 hci_dev_put(hdev);
1162 return err;
1163}
1164
1165int hci_dev_reset(__u16 dev)
1166{
1167 struct hci_dev *hdev;
1168 int ret = 0;
1169
70f23020
AE
1170 hdev = hci_dev_get(dev);
1171 if (!hdev)
1da177e4
LT
1172 return -ENODEV;
1173
1174 hci_req_lock(hdev);
1da177e4
LT
1175
1176 if (!test_bit(HCI_UP, &hdev->flags))
1177 goto done;
1178
1179 /* Drop queues */
1180 skb_queue_purge(&hdev->rx_q);
1181 skb_queue_purge(&hdev->cmd_q);
1182
09fd0de5 1183 hci_dev_lock(hdev);
1da177e4
LT
1184 inquiry_cache_flush(hdev);
1185 hci_conn_hash_flush(hdev);
09fd0de5 1186 hci_dev_unlock(hdev);
1da177e4
LT
1187
1188 if (hdev->flush)
1189 hdev->flush(hdev);
1190
8e87d142 1191 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1192 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1193
1194 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1195 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1196
1197done:
1da177e4
LT
1198 hci_req_unlock(hdev);
1199 hci_dev_put(hdev);
1200 return ret;
1201}
1202
1203int hci_dev_reset_stat(__u16 dev)
1204{
1205 struct hci_dev *hdev;
1206 int ret = 0;
1207
70f23020
AE
1208 hdev = hci_dev_get(dev);
1209 if (!hdev)
1da177e4
LT
1210 return -ENODEV;
1211
1212 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1213
1214 hci_dev_put(hdev);
1215
1216 return ret;
1217}
1218
1219int hci_dev_cmd(unsigned int cmd, void __user *arg)
1220{
1221 struct hci_dev *hdev;
1222 struct hci_dev_req dr;
1223 int err = 0;
1224
1225 if (copy_from_user(&dr, arg, sizeof(dr)))
1226 return -EFAULT;
1227
70f23020
AE
1228 hdev = hci_dev_get(dr.dev_id);
1229 if (!hdev)
1da177e4
LT
1230 return -ENODEV;
1231
1232 switch (cmd) {
1233 case HCISETAUTH:
01178cd4
JH
1234 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1235 HCI_INIT_TIMEOUT);
1da177e4
LT
1236 break;
1237
1238 case HCISETENCRYPT:
1239 if (!lmp_encrypt_capable(hdev)) {
1240 err = -EOPNOTSUPP;
1241 break;
1242 }
1243
1244 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1245 /* Auth must be enabled first */
01178cd4
JH
1246 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1247 HCI_INIT_TIMEOUT);
1da177e4
LT
1248 if (err)
1249 break;
1250 }
1251
01178cd4
JH
1252 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1253 HCI_INIT_TIMEOUT);
1da177e4
LT
1254 break;
1255
1256 case HCISETSCAN:
01178cd4
JH
1257 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1258 HCI_INIT_TIMEOUT);
1da177e4
LT
1259 break;
1260
1da177e4 1261 case HCISETLINKPOL:
01178cd4
JH
1262 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1263 HCI_INIT_TIMEOUT);
1da177e4
LT
1264 break;
1265
1266 case HCISETLINKMODE:
e4e8e37c
MH
1267 hdev->link_mode = ((__u16) dr.dev_opt) &
1268 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1269 break;
1270
1271 case HCISETPTYPE:
1272 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1273 break;
1274
1275 case HCISETACLMTU:
e4e8e37c
MH
1276 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1277 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1278 break;
1279
1280 case HCISETSCOMTU:
e4e8e37c
MH
1281 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1282 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1283 break;
1284
1285 default:
1286 err = -EINVAL;
1287 break;
1288 }
e4e8e37c 1289
1da177e4
LT
1290 hci_dev_put(hdev);
1291 return err;
1292}
1293
1294int hci_get_dev_list(void __user *arg)
1295{
8035ded4 1296 struct hci_dev *hdev;
1da177e4
LT
1297 struct hci_dev_list_req *dl;
1298 struct hci_dev_req *dr;
1da177e4
LT
1299 int n = 0, size, err;
1300 __u16 dev_num;
1301
1302 if (get_user(dev_num, (__u16 __user *) arg))
1303 return -EFAULT;
1304
1305 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1306 return -EINVAL;
1307
1308 size = sizeof(*dl) + dev_num * sizeof(*dr);
1309
70f23020
AE
1310 dl = kzalloc(size, GFP_KERNEL);
1311 if (!dl)
1da177e4
LT
1312 return -ENOMEM;
1313
1314 dr = dl->dev_req;
1315
f20d09d5 1316 read_lock(&hci_dev_list_lock);
8035ded4 1317 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1318 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1319 cancel_delayed_work(&hdev->power_off);
c542a06c 1320
a8b2d5c2
JH
1321 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1322 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1323
1da177e4
LT
1324 (dr + n)->dev_id = hdev->id;
1325 (dr + n)->dev_opt = hdev->flags;
c542a06c 1326
1da177e4
LT
1327 if (++n >= dev_num)
1328 break;
1329 }
f20d09d5 1330 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1331
1332 dl->dev_num = n;
1333 size = sizeof(*dl) + n * sizeof(*dr);
1334
1335 err = copy_to_user(arg, dl, size);
1336 kfree(dl);
1337
1338 return err ? -EFAULT : 0;
1339}
1340
1341int hci_get_dev_info(void __user *arg)
1342{
1343 struct hci_dev *hdev;
1344 struct hci_dev_info di;
1345 int err = 0;
1346
1347 if (copy_from_user(&di, arg, sizeof(di)))
1348 return -EFAULT;
1349
70f23020
AE
1350 hdev = hci_dev_get(di.dev_id);
1351 if (!hdev)
1da177e4
LT
1352 return -ENODEV;
1353
a8b2d5c2 1354 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1355 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1356
a8b2d5c2
JH
1357 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1358 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1359
1da177e4
LT
1360 strcpy(di.name, hdev->name);
1361 di.bdaddr = hdev->bdaddr;
943da25d 1362 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1363 di.flags = hdev->flags;
1364 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1365 if (lmp_bredr_capable(hdev)) {
1366 di.acl_mtu = hdev->acl_mtu;
1367 di.acl_pkts = hdev->acl_pkts;
1368 di.sco_mtu = hdev->sco_mtu;
1369 di.sco_pkts = hdev->sco_pkts;
1370 } else {
1371 di.acl_mtu = hdev->le_mtu;
1372 di.acl_pkts = hdev->le_pkts;
1373 di.sco_mtu = 0;
1374 di.sco_pkts = 0;
1375 }
1da177e4
LT
1376 di.link_policy = hdev->link_policy;
1377 di.link_mode = hdev->link_mode;
1378
1379 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1380 memcpy(&di.features, &hdev->features, sizeof(di.features));
1381
1382 if (copy_to_user(arg, &di, sizeof(di)))
1383 err = -EFAULT;
1384
1385 hci_dev_put(hdev);
1386
1387 return err;
1388}
1389
1390/* ---- Interface to HCI drivers ---- */
1391
611b30f7
MH
1392static int hci_rfkill_set_block(void *data, bool blocked)
1393{
1394 struct hci_dev *hdev = data;
1395
1396 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1397
1398 if (!blocked)
1399 return 0;
1400
1401 hci_dev_do_close(hdev);
1402
1403 return 0;
1404}
1405
1406static const struct rfkill_ops hci_rfkill_ops = {
1407 .set_block = hci_rfkill_set_block,
1408};
1409
ab81cbf9
JH
1410static void hci_power_on(struct work_struct *work)
1411{
1412 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1413
1414 BT_DBG("%s", hdev->name);
1415
1416 if (hci_dev_open(hdev->id) < 0)
1417 return;
1418
a8b2d5c2 1419 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1420 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1421 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1422
a8b2d5c2 1423 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1424 mgmt_index_added(hdev);
ab81cbf9
JH
1425}
1426
1427static void hci_power_off(struct work_struct *work)
1428{
3243553f 1429 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1430 power_off.work);
ab81cbf9
JH
1431
1432 BT_DBG("%s", hdev->name);
1433
8ee56540 1434 hci_dev_do_close(hdev);
ab81cbf9
JH
1435}
1436
16ab91ab
JH
1437static void hci_discov_off(struct work_struct *work)
1438{
1439 struct hci_dev *hdev;
1440 u8 scan = SCAN_PAGE;
1441
1442 hdev = container_of(work, struct hci_dev, discov_off.work);
1443
1444 BT_DBG("%s", hdev->name);
1445
09fd0de5 1446 hci_dev_lock(hdev);
16ab91ab
JH
1447
1448 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1449
1450 hdev->discov_timeout = 0;
1451
09fd0de5 1452 hci_dev_unlock(hdev);
16ab91ab
JH
1453}
1454
2aeb9a1a
JH
1455int hci_uuids_clear(struct hci_dev *hdev)
1456{
4821002c 1457 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1458
4821002c
JH
1459 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1460 list_del(&uuid->list);
2aeb9a1a
JH
1461 kfree(uuid);
1462 }
1463
1464 return 0;
1465}
1466
55ed8ca1
JH
1467int hci_link_keys_clear(struct hci_dev *hdev)
1468{
1469 struct list_head *p, *n;
1470
1471 list_for_each_safe(p, n, &hdev->link_keys) {
1472 struct link_key *key;
1473
1474 key = list_entry(p, struct link_key, list);
1475
1476 list_del(p);
1477 kfree(key);
1478 }
1479
1480 return 0;
1481}
1482
b899efaf
VCG
1483int hci_smp_ltks_clear(struct hci_dev *hdev)
1484{
1485 struct smp_ltk *k, *tmp;
1486
1487 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1488 list_del(&k->list);
1489 kfree(k);
1490 }
1491
1492 return 0;
1493}
1494
55ed8ca1
JH
1495struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1496{
8035ded4 1497 struct link_key *k;
55ed8ca1 1498
8035ded4 1499 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1500 if (bacmp(bdaddr, &k->bdaddr) == 0)
1501 return k;
55ed8ca1
JH
1502
1503 return NULL;
1504}
1505
745c0ce3 1506static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1507 u8 key_type, u8 old_key_type)
d25e28ab
JH
1508{
1509 /* Legacy key */
1510 if (key_type < 0x03)
745c0ce3 1511 return true;
d25e28ab
JH
1512
1513 /* Debug keys are insecure so don't store them persistently */
1514 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1515 return false;
d25e28ab
JH
1516
1517 /* Changed combination key and there's no previous one */
1518 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1519 return false;
d25e28ab
JH
1520
1521 /* Security mode 3 case */
1522 if (!conn)
745c0ce3 1523 return true;
d25e28ab
JH
1524
1525 /* Neither local nor remote side had no-bonding as requirement */
1526 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1527 return true;
d25e28ab
JH
1528
1529 /* Local side had dedicated bonding as requirement */
1530 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1531 return true;
d25e28ab
JH
1532
1533 /* Remote side had dedicated bonding as requirement */
1534 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1535 return true;
d25e28ab
JH
1536
1537 /* If none of the above criteria match, then don't store the key
1538 * persistently */
745c0ce3 1539 return false;
d25e28ab
JH
1540}
1541
c9839a11 1542struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1543{
c9839a11 1544 struct smp_ltk *k;
75d262c2 1545
c9839a11
VCG
1546 list_for_each_entry(k, &hdev->long_term_keys, list) {
1547 if (k->ediv != ediv ||
a8c5fb1a 1548 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1549 continue;
1550
c9839a11 1551 return k;
75d262c2
VCG
1552 }
1553
1554 return NULL;
1555}
75d262c2 1556
c9839a11 1557struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1558 u8 addr_type)
75d262c2 1559{
c9839a11 1560 struct smp_ltk *k;
75d262c2 1561
c9839a11
VCG
1562 list_for_each_entry(k, &hdev->long_term_keys, list)
1563 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1564 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1565 return k;
1566
1567 return NULL;
1568}
75d262c2 1569
d25e28ab 1570int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1571 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1572{
1573 struct link_key *key, *old_key;
745c0ce3
VA
1574 u8 old_key_type;
1575 bool persistent;
55ed8ca1
JH
1576
1577 old_key = hci_find_link_key(hdev, bdaddr);
1578 if (old_key) {
1579 old_key_type = old_key->type;
1580 key = old_key;
1581 } else {
12adcf3a 1582 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1583 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1584 if (!key)
1585 return -ENOMEM;
1586 list_add(&key->list, &hdev->link_keys);
1587 }
1588
6ed93dc6 1589 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1590
d25e28ab
JH
1591 /* Some buggy controller combinations generate a changed
1592 * combination key for legacy pairing even when there's no
1593 * previous key */
1594 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1595 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1596 type = HCI_LK_COMBINATION;
655fe6ec
JH
1597 if (conn)
1598 conn->key_type = type;
1599 }
d25e28ab 1600
55ed8ca1 1601 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1602 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1603 key->pin_len = pin_len;
1604
b6020ba0 1605 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1606 key->type = old_key_type;
4748fed2
JH
1607 else
1608 key->type = type;
1609
4df378a1
JH
1610 if (!new_key)
1611 return 0;
1612
1613 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1614
744cf19e 1615 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1616
6ec5bcad
VA
1617 if (conn)
1618 conn->flush_key = !persistent;
55ed8ca1
JH
1619
1620 return 0;
1621}
1622
c9839a11 1623int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1624 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1625 ediv, u8 rand[8])
75d262c2 1626{
c9839a11 1627 struct smp_ltk *key, *old_key;
75d262c2 1628
c9839a11
VCG
1629 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1630 return 0;
75d262c2 1631
c9839a11
VCG
1632 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1633 if (old_key)
75d262c2 1634 key = old_key;
c9839a11
VCG
1635 else {
1636 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1637 if (!key)
1638 return -ENOMEM;
c9839a11 1639 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1640 }
1641
75d262c2 1642 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1643 key->bdaddr_type = addr_type;
1644 memcpy(key->val, tk, sizeof(key->val));
1645 key->authenticated = authenticated;
1646 key->ediv = ediv;
1647 key->enc_size = enc_size;
1648 key->type = type;
1649 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1650
c9839a11
VCG
1651 if (!new_key)
1652 return 0;
75d262c2 1653
261cc5aa
VCG
1654 if (type & HCI_SMP_LTK)
1655 mgmt_new_ltk(hdev, key, 1);
1656
75d262c2
VCG
1657 return 0;
1658}
1659
55ed8ca1
JH
1660int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1661{
1662 struct link_key *key;
1663
1664 key = hci_find_link_key(hdev, bdaddr);
1665 if (!key)
1666 return -ENOENT;
1667
6ed93dc6 1668 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1669
1670 list_del(&key->list);
1671 kfree(key);
1672
1673 return 0;
1674}
1675
b899efaf
VCG
1676int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1677{
1678 struct smp_ltk *k, *tmp;
1679
1680 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1681 if (bacmp(bdaddr, &k->bdaddr))
1682 continue;
1683
6ed93dc6 1684 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1685
1686 list_del(&k->list);
1687 kfree(k);
1688 }
1689
1690 return 0;
1691}
1692
6bd32326 1693/* HCI command timer function */
bda4f23a 1694static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1695{
1696 struct hci_dev *hdev = (void *) arg;
1697
bda4f23a
AE
1698 if (hdev->sent_cmd) {
1699 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1700 u16 opcode = __le16_to_cpu(sent->opcode);
1701
1702 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1703 } else {
1704 BT_ERR("%s command tx timeout", hdev->name);
1705 }
1706
6bd32326 1707 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1708 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1709}
1710
2763eda6 1711struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1712 bdaddr_t *bdaddr)
2763eda6
SJ
1713{
1714 struct oob_data *data;
1715
1716 list_for_each_entry(data, &hdev->remote_oob_data, list)
1717 if (bacmp(bdaddr, &data->bdaddr) == 0)
1718 return data;
1719
1720 return NULL;
1721}
1722
1723int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1724{
1725 struct oob_data *data;
1726
1727 data = hci_find_remote_oob_data(hdev, bdaddr);
1728 if (!data)
1729 return -ENOENT;
1730
6ed93dc6 1731 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1732
1733 list_del(&data->list);
1734 kfree(data);
1735
1736 return 0;
1737}
1738
1739int hci_remote_oob_data_clear(struct hci_dev *hdev)
1740{
1741 struct oob_data *data, *n;
1742
1743 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1744 list_del(&data->list);
1745 kfree(data);
1746 }
1747
1748 return 0;
1749}
1750
1751int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1752 u8 *randomizer)
2763eda6
SJ
1753{
1754 struct oob_data *data;
1755
1756 data = hci_find_remote_oob_data(hdev, bdaddr);
1757
1758 if (!data) {
1759 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1760 if (!data)
1761 return -ENOMEM;
1762
1763 bacpy(&data->bdaddr, bdaddr);
1764 list_add(&data->list, &hdev->remote_oob_data);
1765 }
1766
1767 memcpy(data->hash, hash, sizeof(data->hash));
1768 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1769
6ed93dc6 1770 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1771
1772 return 0;
1773}
1774
04124681 1775struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1776{
8035ded4 1777 struct bdaddr_list *b;
b2a66aad 1778
8035ded4 1779 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1780 if (bacmp(bdaddr, &b->bdaddr) == 0)
1781 return b;
b2a66aad
AJ
1782
1783 return NULL;
1784}
1785
1786int hci_blacklist_clear(struct hci_dev *hdev)
1787{
1788 struct list_head *p, *n;
1789
1790 list_for_each_safe(p, n, &hdev->blacklist) {
1791 struct bdaddr_list *b;
1792
1793 b = list_entry(p, struct bdaddr_list, list);
1794
1795 list_del(p);
1796 kfree(b);
1797 }
1798
1799 return 0;
1800}
1801
88c1fe4b 1802int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1803{
1804 struct bdaddr_list *entry;
b2a66aad
AJ
1805
1806 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1807 return -EBADF;
1808
5e762444
AJ
1809 if (hci_blacklist_lookup(hdev, bdaddr))
1810 return -EEXIST;
b2a66aad
AJ
1811
1812 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1813 if (!entry)
1814 return -ENOMEM;
b2a66aad
AJ
1815
1816 bacpy(&entry->bdaddr, bdaddr);
1817
1818 list_add(&entry->list, &hdev->blacklist);
1819
88c1fe4b 1820 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1821}
1822
88c1fe4b 1823int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1824{
1825 struct bdaddr_list *entry;
b2a66aad 1826
1ec918ce 1827 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1828 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1829
1830 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1831 if (!entry)
5e762444 1832 return -ENOENT;
b2a66aad
AJ
1833
1834 list_del(&entry->list);
1835 kfree(entry);
1836
88c1fe4b 1837 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1838}
1839
42c6b129 1840static void le_scan_param_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1841{
1842 struct le_scan_params *param = (struct le_scan_params *) opt;
1843 struct hci_cp_le_set_scan_param cp;
1844
1845 memset(&cp, 0, sizeof(cp));
1846 cp.type = param->type;
1847 cp.interval = cpu_to_le16(param->interval);
1848 cp.window = cpu_to_le16(param->window);
1849
42c6b129 1850 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
7ba8b4be
AG
1851}
1852
42c6b129 1853static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1854{
1855 struct hci_cp_le_set_scan_enable cp;
1856
1857 memset(&cp, 0, sizeof(cp));
1858 cp.enable = 1;
0431a43c 1859 cp.filter_dup = 1;
7ba8b4be 1860
42c6b129 1861 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
7ba8b4be
AG
1862}
1863
1864static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1865 u16 window, int timeout)
7ba8b4be
AG
1866{
1867 long timeo = msecs_to_jiffies(3000);
1868 struct le_scan_params param;
1869 int err;
1870
1871 BT_DBG("%s", hdev->name);
1872
1873 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1874 return -EINPROGRESS;
1875
1876 param.type = type;
1877 param.interval = interval;
1878 param.window = window;
1879
1880 hci_req_lock(hdev);
1881
01178cd4
JH
1882 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1883 timeo);
7ba8b4be 1884 if (!err)
01178cd4 1885 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
1886
1887 hci_req_unlock(hdev);
1888
1889 if (err < 0)
1890 return err;
1891
46818ed5
JH
1892 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1893 msecs_to_jiffies(timeout));
7ba8b4be
AG
1894
1895 return 0;
1896}
1897
7dbfac1d
AG
1898int hci_cancel_le_scan(struct hci_dev *hdev)
1899{
1900 BT_DBG("%s", hdev->name);
1901
1902 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1903 return -EALREADY;
1904
1905 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1906 struct hci_cp_le_set_scan_enable cp;
1907
1908 /* Send HCI command to disable LE Scan */
1909 memset(&cp, 0, sizeof(cp));
1910 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1911 }
1912
1913 return 0;
1914}
1915
7ba8b4be
AG
1916static void le_scan_disable_work(struct work_struct *work)
1917{
1918 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1919 le_scan_disable.work);
7ba8b4be
AG
1920 struct hci_cp_le_set_scan_enable cp;
1921
1922 BT_DBG("%s", hdev->name);
1923
1924 memset(&cp, 0, sizeof(cp));
1925
1926 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1927}
1928
28b75a89
AG
1929static void le_scan_work(struct work_struct *work)
1930{
1931 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1932 struct le_scan_params *param = &hdev->le_scan_params;
1933
1934 BT_DBG("%s", hdev->name);
1935
04124681
GP
1936 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1937 param->timeout);
28b75a89
AG
1938}
1939
1940int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1941 int timeout)
28b75a89
AG
1942{
1943 struct le_scan_params *param = &hdev->le_scan_params;
1944
1945 BT_DBG("%s", hdev->name);
1946
f1550478
JH
1947 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1948 return -ENOTSUPP;
1949
28b75a89
AG
1950 if (work_busy(&hdev->le_scan))
1951 return -EINPROGRESS;
1952
1953 param->type = type;
1954 param->interval = interval;
1955 param->window = window;
1956 param->timeout = timeout;
1957
1958 queue_work(system_long_wq, &hdev->le_scan);
1959
1960 return 0;
1961}
1962
9be0dab7
DH
1963/* Alloc HCI device */
1964struct hci_dev *hci_alloc_dev(void)
1965{
1966 struct hci_dev *hdev;
1967
1968 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1969 if (!hdev)
1970 return NULL;
1971
b1b813d4
DH
1972 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1973 hdev->esco_type = (ESCO_HV1);
1974 hdev->link_mode = (HCI_LM_ACCEPT);
1975 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
1976 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1977 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 1978
b1b813d4
DH
1979 hdev->sniff_max_interval = 800;
1980 hdev->sniff_min_interval = 80;
1981
1982 mutex_init(&hdev->lock);
1983 mutex_init(&hdev->req_lock);
1984
1985 INIT_LIST_HEAD(&hdev->mgmt_pending);
1986 INIT_LIST_HEAD(&hdev->blacklist);
1987 INIT_LIST_HEAD(&hdev->uuids);
1988 INIT_LIST_HEAD(&hdev->link_keys);
1989 INIT_LIST_HEAD(&hdev->long_term_keys);
1990 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 1991 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
1992
1993 INIT_WORK(&hdev->rx_work, hci_rx_work);
1994 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1995 INIT_WORK(&hdev->tx_work, hci_tx_work);
1996 INIT_WORK(&hdev->power_on, hci_power_on);
1997 INIT_WORK(&hdev->le_scan, le_scan_work);
1998
b1b813d4
DH
1999 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2000 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2001 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2002
9be0dab7 2003 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
2004 skb_queue_head_init(&hdev->rx_q);
2005 skb_queue_head_init(&hdev->cmd_q);
2006 skb_queue_head_init(&hdev->raw_q);
2007
2008 init_waitqueue_head(&hdev->req_wait_q);
2009
bda4f23a 2010 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2011
b1b813d4
DH
2012 hci_init_sysfs(hdev);
2013 discovery_init(hdev);
9be0dab7
DH
2014
2015 return hdev;
2016}
2017EXPORT_SYMBOL(hci_alloc_dev);
2018
2019/* Free HCI device */
2020void hci_free_dev(struct hci_dev *hdev)
2021{
2022 skb_queue_purge(&hdev->driver_init);
2023
2024 /* will free via device release */
2025 put_device(&hdev->dev);
2026}
2027EXPORT_SYMBOL(hci_free_dev);
2028
1da177e4
LT
2029/* Register HCI device */
2030int hci_register_dev(struct hci_dev *hdev)
2031{
b1b813d4 2032 int id, error;
1da177e4 2033
010666a1 2034 if (!hdev->open || !hdev->close)
1da177e4
LT
2035 return -EINVAL;
2036
08add513
MM
2037 /* Do not allow HCI_AMP devices to register at index 0,
2038 * so the index can be used as the AMP controller ID.
2039 */
3df92b31
SL
2040 switch (hdev->dev_type) {
2041 case HCI_BREDR:
2042 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2043 break;
2044 case HCI_AMP:
2045 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2046 break;
2047 default:
2048 return -EINVAL;
1da177e4 2049 }
8e87d142 2050
3df92b31
SL
2051 if (id < 0)
2052 return id;
2053
1da177e4
LT
2054 sprintf(hdev->name, "hci%d", id);
2055 hdev->id = id;
2d8b3a11
AE
2056
2057 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2058
3df92b31
SL
2059 write_lock(&hci_dev_list_lock);
2060 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2061 write_unlock(&hci_dev_list_lock);
1da177e4 2062
32845eb1 2063 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 2064 WQ_MEM_RECLAIM, 1);
33ca954d
DH
2065 if (!hdev->workqueue) {
2066 error = -ENOMEM;
2067 goto err;
2068 }
f48fd9c8 2069
6ead1bbc
JH
2070 hdev->req_workqueue = alloc_workqueue(hdev->name,
2071 WQ_HIGHPRI | WQ_UNBOUND |
2072 WQ_MEM_RECLAIM, 1);
2073 if (!hdev->req_workqueue) {
2074 destroy_workqueue(hdev->workqueue);
2075 error = -ENOMEM;
2076 goto err;
2077 }
2078
33ca954d
DH
2079 error = hci_add_sysfs(hdev);
2080 if (error < 0)
2081 goto err_wqueue;
1da177e4 2082
611b30f7 2083 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2084 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2085 hdev);
611b30f7
MH
2086 if (hdev->rfkill) {
2087 if (rfkill_register(hdev->rfkill) < 0) {
2088 rfkill_destroy(hdev->rfkill);
2089 hdev->rfkill = NULL;
2090 }
2091 }
2092
a8b2d5c2 2093 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2094
2095 if (hdev->dev_type != HCI_AMP)
2096 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2097
1da177e4 2098 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2099 hci_dev_hold(hdev);
1da177e4 2100
19202573 2101 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2102
1da177e4 2103 return id;
f48fd9c8 2104
33ca954d
DH
2105err_wqueue:
2106 destroy_workqueue(hdev->workqueue);
6ead1bbc 2107 destroy_workqueue(hdev->req_workqueue);
33ca954d 2108err:
3df92b31 2109 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2110 write_lock(&hci_dev_list_lock);
f48fd9c8 2111 list_del(&hdev->list);
f20d09d5 2112 write_unlock(&hci_dev_list_lock);
f48fd9c8 2113
33ca954d 2114 return error;
1da177e4
LT
2115}
2116EXPORT_SYMBOL(hci_register_dev);
2117
2118/* Unregister HCI device */
59735631 2119void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2120{
3df92b31 2121 int i, id;
ef222013 2122
c13854ce 2123 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2124
94324962
JH
2125 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2126
3df92b31
SL
2127 id = hdev->id;
2128
f20d09d5 2129 write_lock(&hci_dev_list_lock);
1da177e4 2130 list_del(&hdev->list);
f20d09d5 2131 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2132
2133 hci_dev_do_close(hdev);
2134
cd4c5391 2135 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2136 kfree_skb(hdev->reassembly[i]);
2137
b9b5ef18
GP
2138 cancel_work_sync(&hdev->power_on);
2139
ab81cbf9 2140 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2141 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2142 hci_dev_lock(hdev);
744cf19e 2143 mgmt_index_removed(hdev);
09fd0de5 2144 hci_dev_unlock(hdev);
56e5cb86 2145 }
ab81cbf9 2146
2e58ef3e
JH
2147 /* mgmt_index_removed should take care of emptying the
2148 * pending list */
2149 BUG_ON(!list_empty(&hdev->mgmt_pending));
2150
1da177e4
LT
2151 hci_notify(hdev, HCI_DEV_UNREG);
2152
611b30f7
MH
2153 if (hdev->rfkill) {
2154 rfkill_unregister(hdev->rfkill);
2155 rfkill_destroy(hdev->rfkill);
2156 }
2157
ce242970 2158 hci_del_sysfs(hdev);
147e2d59 2159
f48fd9c8 2160 destroy_workqueue(hdev->workqueue);
6ead1bbc 2161 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2162
09fd0de5 2163 hci_dev_lock(hdev);
e2e0cacb 2164 hci_blacklist_clear(hdev);
2aeb9a1a 2165 hci_uuids_clear(hdev);
55ed8ca1 2166 hci_link_keys_clear(hdev);
b899efaf 2167 hci_smp_ltks_clear(hdev);
2763eda6 2168 hci_remote_oob_data_clear(hdev);
09fd0de5 2169 hci_dev_unlock(hdev);
e2e0cacb 2170
dc946bd8 2171 hci_dev_put(hdev);
3df92b31
SL
2172
2173 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2174}
2175EXPORT_SYMBOL(hci_unregister_dev);
2176
2177/* Suspend HCI device */
2178int hci_suspend_dev(struct hci_dev *hdev)
2179{
2180 hci_notify(hdev, HCI_DEV_SUSPEND);
2181 return 0;
2182}
2183EXPORT_SYMBOL(hci_suspend_dev);
2184
2185/* Resume HCI device */
2186int hci_resume_dev(struct hci_dev *hdev)
2187{
2188 hci_notify(hdev, HCI_DEV_RESUME);
2189 return 0;
2190}
2191EXPORT_SYMBOL(hci_resume_dev);
2192
76bca880
MH
2193/* Receive frame from HCI drivers */
2194int hci_recv_frame(struct sk_buff *skb)
2195{
2196 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2197 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2198 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2199 kfree_skb(skb);
2200 return -ENXIO;
2201 }
2202
d82603c6 2203 /* Incoming skb */
76bca880
MH
2204 bt_cb(skb)->incoming = 1;
2205
2206 /* Time stamp */
2207 __net_timestamp(skb);
2208
76bca880 2209 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2210 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2211
76bca880
MH
2212 return 0;
2213}
2214EXPORT_SYMBOL(hci_recv_frame);
2215
33e882a5 2216static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2217 int count, __u8 index)
33e882a5
SS
2218{
2219 int len = 0;
2220 int hlen = 0;
2221 int remain = count;
2222 struct sk_buff *skb;
2223 struct bt_skb_cb *scb;
2224
2225 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2226 index >= NUM_REASSEMBLY)
33e882a5
SS
2227 return -EILSEQ;
2228
2229 skb = hdev->reassembly[index];
2230
2231 if (!skb) {
2232 switch (type) {
2233 case HCI_ACLDATA_PKT:
2234 len = HCI_MAX_FRAME_SIZE;
2235 hlen = HCI_ACL_HDR_SIZE;
2236 break;
2237 case HCI_EVENT_PKT:
2238 len = HCI_MAX_EVENT_SIZE;
2239 hlen = HCI_EVENT_HDR_SIZE;
2240 break;
2241 case HCI_SCODATA_PKT:
2242 len = HCI_MAX_SCO_SIZE;
2243 hlen = HCI_SCO_HDR_SIZE;
2244 break;
2245 }
2246
1e429f38 2247 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2248 if (!skb)
2249 return -ENOMEM;
2250
2251 scb = (void *) skb->cb;
2252 scb->expect = hlen;
2253 scb->pkt_type = type;
2254
2255 skb->dev = (void *) hdev;
2256 hdev->reassembly[index] = skb;
2257 }
2258
2259 while (count) {
2260 scb = (void *) skb->cb;
89bb46d0 2261 len = min_t(uint, scb->expect, count);
33e882a5
SS
2262
2263 memcpy(skb_put(skb, len), data, len);
2264
2265 count -= len;
2266 data += len;
2267 scb->expect -= len;
2268 remain = count;
2269
2270 switch (type) {
2271 case HCI_EVENT_PKT:
2272 if (skb->len == HCI_EVENT_HDR_SIZE) {
2273 struct hci_event_hdr *h = hci_event_hdr(skb);
2274 scb->expect = h->plen;
2275
2276 if (skb_tailroom(skb) < scb->expect) {
2277 kfree_skb(skb);
2278 hdev->reassembly[index] = NULL;
2279 return -ENOMEM;
2280 }
2281 }
2282 break;
2283
2284 case HCI_ACLDATA_PKT:
2285 if (skb->len == HCI_ACL_HDR_SIZE) {
2286 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2287 scb->expect = __le16_to_cpu(h->dlen);
2288
2289 if (skb_tailroom(skb) < scb->expect) {
2290 kfree_skb(skb);
2291 hdev->reassembly[index] = NULL;
2292 return -ENOMEM;
2293 }
2294 }
2295 break;
2296
2297 case HCI_SCODATA_PKT:
2298 if (skb->len == HCI_SCO_HDR_SIZE) {
2299 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2300 scb->expect = h->dlen;
2301
2302 if (skb_tailroom(skb) < scb->expect) {
2303 kfree_skb(skb);
2304 hdev->reassembly[index] = NULL;
2305 return -ENOMEM;
2306 }
2307 }
2308 break;
2309 }
2310
2311 if (scb->expect == 0) {
2312 /* Complete frame */
2313
2314 bt_cb(skb)->pkt_type = type;
2315 hci_recv_frame(skb);
2316
2317 hdev->reassembly[index] = NULL;
2318 return remain;
2319 }
2320 }
2321
2322 return remain;
2323}
2324
ef222013
MH
2325int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2326{
f39a3c06
SS
2327 int rem = 0;
2328
ef222013
MH
2329 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2330 return -EILSEQ;
2331
da5f6c37 2332 while (count) {
1e429f38 2333 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2334 if (rem < 0)
2335 return rem;
ef222013 2336
f39a3c06
SS
2337 data += (count - rem);
2338 count = rem;
f81c6224 2339 }
ef222013 2340
f39a3c06 2341 return rem;
ef222013
MH
2342}
2343EXPORT_SYMBOL(hci_recv_fragment);
2344
99811510
SS
2345#define STREAM_REASSEMBLY 0
2346
2347int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2348{
2349 int type;
2350 int rem = 0;
2351
da5f6c37 2352 while (count) {
99811510
SS
2353 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2354
2355 if (!skb) {
2356 struct { char type; } *pkt;
2357
2358 /* Start of the frame */
2359 pkt = data;
2360 type = pkt->type;
2361
2362 data++;
2363 count--;
2364 } else
2365 type = bt_cb(skb)->pkt_type;
2366
1e429f38 2367 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2368 STREAM_REASSEMBLY);
99811510
SS
2369 if (rem < 0)
2370 return rem;
2371
2372 data += (count - rem);
2373 count = rem;
f81c6224 2374 }
99811510
SS
2375
2376 return rem;
2377}
2378EXPORT_SYMBOL(hci_recv_stream_fragment);
2379
1da177e4
LT
2380/* ---- Interface to upper protocols ---- */
2381
1da177e4
LT
2382int hci_register_cb(struct hci_cb *cb)
2383{
2384 BT_DBG("%p name %s", cb, cb->name);
2385
f20d09d5 2386 write_lock(&hci_cb_list_lock);
1da177e4 2387 list_add(&cb->list, &hci_cb_list);
f20d09d5 2388 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2389
2390 return 0;
2391}
2392EXPORT_SYMBOL(hci_register_cb);
2393
2394int hci_unregister_cb(struct hci_cb *cb)
2395{
2396 BT_DBG("%p name %s", cb, cb->name);
2397
f20d09d5 2398 write_lock(&hci_cb_list_lock);
1da177e4 2399 list_del(&cb->list);
f20d09d5 2400 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2401
2402 return 0;
2403}
2404EXPORT_SYMBOL(hci_unregister_cb);
2405
2406static int hci_send_frame(struct sk_buff *skb)
2407{
2408 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2409
2410 if (!hdev) {
2411 kfree_skb(skb);
2412 return -ENODEV;
2413 }
2414
0d48d939 2415 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2416
cd82e61c
MH
2417 /* Time stamp */
2418 __net_timestamp(skb);
1da177e4 2419
cd82e61c
MH
2420 /* Send copy to monitor */
2421 hci_send_to_monitor(hdev, skb);
2422
2423 if (atomic_read(&hdev->promisc)) {
2424 /* Send copy to the sockets */
470fe1b5 2425 hci_send_to_sock(hdev, skb);
1da177e4
LT
2426 }
2427
2428 /* Get rid of skb owner, prior to sending to the driver. */
2429 skb_orphan(skb);
2430
2431 return hdev->send(skb);
2432}
2433
3119ae95
JH
2434void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2435{
2436 skb_queue_head_init(&req->cmd_q);
2437 req->hdev = hdev;
5d73e034 2438 req->err = 0;
3119ae95
JH
2439}
2440
2441int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2442{
2443 struct hci_dev *hdev = req->hdev;
2444 struct sk_buff *skb;
2445 unsigned long flags;
2446
2447 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2448
5d73e034
AG
2449 /* If an error occured during request building, remove all HCI
2450 * commands queued on the HCI request queue.
2451 */
2452 if (req->err) {
2453 skb_queue_purge(&req->cmd_q);
2454 return req->err;
2455 }
2456
3119ae95
JH
2457 /* Do not allow empty requests */
2458 if (skb_queue_empty(&req->cmd_q))
382b0c39 2459 return -ENODATA;
3119ae95
JH
2460
2461 skb = skb_peek_tail(&req->cmd_q);
2462 bt_cb(skb)->req.complete = complete;
2463
2464 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2465 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2466 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2467
2468 queue_work(hdev->workqueue, &hdev->cmd_work);
2469
2470 return 0;
2471}
2472
1ca3a9d0
JH
2473static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2474 u32 plen, void *param)
1da177e4
LT
2475{
2476 int len = HCI_COMMAND_HDR_SIZE + plen;
2477 struct hci_command_hdr *hdr;
2478 struct sk_buff *skb;
2479
1da177e4 2480 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2481 if (!skb)
2482 return NULL;
1da177e4
LT
2483
2484 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2485 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2486 hdr->plen = plen;
2487
2488 if (plen)
2489 memcpy(skb_put(skb, plen), param, plen);
2490
2491 BT_DBG("skb len %d", skb->len);
2492
0d48d939 2493 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2494 skb->dev = (void *) hdev;
c78ae283 2495
1ca3a9d0
JH
2496 return skb;
2497}
2498
2499/* Send HCI command */
2500int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2501{
2502 struct sk_buff *skb;
2503
2504 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2505
2506 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2507 if (!skb) {
2508 BT_ERR("%s no memory for command", hdev->name);
2509 return -ENOMEM;
2510 }
2511
11714b3d
JH
2512 /* Stand-alone HCI commands must be flaged as
2513 * single-command requests.
2514 */
2515 bt_cb(skb)->req.start = true;
2516
1da177e4 2517 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2518 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2519
2520 return 0;
2521}
1da177e4 2522
71c76a17 2523/* Queue a command to an asynchronous HCI request */
e348fe6b 2524void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
71c76a17
JH
2525{
2526 struct hci_dev *hdev = req->hdev;
2527 struct sk_buff *skb;
2528
2529 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2530
34739c1e
AG
2531 /* If an error occured during request building, there is no point in
2532 * queueing the HCI command. We can simply return.
2533 */
2534 if (req->err)
2535 return;
2536
71c76a17
JH
2537 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2538 if (!skb) {
5d73e034
AG
2539 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2540 hdev->name, opcode);
2541 req->err = -ENOMEM;
e348fe6b 2542 return;
71c76a17
JH
2543 }
2544
2545 if (skb_queue_empty(&req->cmd_q))
2546 bt_cb(skb)->req.start = true;
2547
2548 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2549}
2550
1da177e4 2551/* Get data from the previously sent command */
a9de9248 2552void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2553{
2554 struct hci_command_hdr *hdr;
2555
2556 if (!hdev->sent_cmd)
2557 return NULL;
2558
2559 hdr = (void *) hdev->sent_cmd->data;
2560
a9de9248 2561 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2562 return NULL;
2563
f0e09510 2564 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2565
2566 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2567}
2568
2569/* Send ACL data */
2570static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2571{
2572 struct hci_acl_hdr *hdr;
2573 int len = skb->len;
2574
badff6d0
ACM
2575 skb_push(skb, HCI_ACL_HDR_SIZE);
2576 skb_reset_transport_header(skb);
9c70220b 2577 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2578 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2579 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2580}
2581
ee22be7e 2582static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2583 struct sk_buff *skb, __u16 flags)
1da177e4 2584{
ee22be7e 2585 struct hci_conn *conn = chan->conn;
1da177e4
LT
2586 struct hci_dev *hdev = conn->hdev;
2587 struct sk_buff *list;
2588
087bfd99
GP
2589 skb->len = skb_headlen(skb);
2590 skb->data_len = 0;
2591
2592 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2593
2594 switch (hdev->dev_type) {
2595 case HCI_BREDR:
2596 hci_add_acl_hdr(skb, conn->handle, flags);
2597 break;
2598 case HCI_AMP:
2599 hci_add_acl_hdr(skb, chan->handle, flags);
2600 break;
2601 default:
2602 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2603 return;
2604 }
087bfd99 2605
70f23020
AE
2606 list = skb_shinfo(skb)->frag_list;
2607 if (!list) {
1da177e4
LT
2608 /* Non fragmented */
2609 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2610
73d80deb 2611 skb_queue_tail(queue, skb);
1da177e4
LT
2612 } else {
2613 /* Fragmented */
2614 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2615
2616 skb_shinfo(skb)->frag_list = NULL;
2617
2618 /* Queue all fragments atomically */
af3e6359 2619 spin_lock(&queue->lock);
1da177e4 2620
73d80deb 2621 __skb_queue_tail(queue, skb);
e702112f
AE
2622
2623 flags &= ~ACL_START;
2624 flags |= ACL_CONT;
1da177e4
LT
2625 do {
2626 skb = list; list = list->next;
8e87d142 2627
1da177e4 2628 skb->dev = (void *) hdev;
0d48d939 2629 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2630 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2631
2632 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2633
73d80deb 2634 __skb_queue_tail(queue, skb);
1da177e4
LT
2635 } while (list);
2636
af3e6359 2637 spin_unlock(&queue->lock);
1da177e4 2638 }
73d80deb
LAD
2639}
2640
2641void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2642{
ee22be7e 2643 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2644
f0e09510 2645 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2646
2647 skb->dev = (void *) hdev;
73d80deb 2648
ee22be7e 2649 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2650
3eff45ea 2651 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2652}
1da177e4
LT
2653
2654/* Send SCO data */
0d861d8b 2655void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2656{
2657 struct hci_dev *hdev = conn->hdev;
2658 struct hci_sco_hdr hdr;
2659
2660 BT_DBG("%s len %d", hdev->name, skb->len);
2661
aca3192c 2662 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2663 hdr.dlen = skb->len;
2664
badff6d0
ACM
2665 skb_push(skb, HCI_SCO_HDR_SIZE);
2666 skb_reset_transport_header(skb);
9c70220b 2667 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2668
2669 skb->dev = (void *) hdev;
0d48d939 2670 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2671
1da177e4 2672 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2673 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2674}
1da177e4
LT
2675
2676/* ---- HCI TX task (outgoing data) ---- */
2677
2678/* HCI Connection scheduler */
6039aa73
GP
2679static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2680 int *quote)
1da177e4
LT
2681{
2682 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2683 struct hci_conn *conn = NULL, *c;
abc5de8f 2684 unsigned int num = 0, min = ~0;
1da177e4 2685
8e87d142 2686 /* We don't have to lock device here. Connections are always
1da177e4 2687 * added and removed with TX task disabled. */
bf4c6325
GP
2688
2689 rcu_read_lock();
2690
2691 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2692 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2693 continue;
769be974
MH
2694
2695 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2696 continue;
2697
1da177e4
LT
2698 num++;
2699
2700 if (c->sent < min) {
2701 min = c->sent;
2702 conn = c;
2703 }
52087a79
LAD
2704
2705 if (hci_conn_num(hdev, type) == num)
2706 break;
1da177e4
LT
2707 }
2708
bf4c6325
GP
2709 rcu_read_unlock();
2710
1da177e4 2711 if (conn) {
6ed58ec5
VT
2712 int cnt, q;
2713
2714 switch (conn->type) {
2715 case ACL_LINK:
2716 cnt = hdev->acl_cnt;
2717 break;
2718 case SCO_LINK:
2719 case ESCO_LINK:
2720 cnt = hdev->sco_cnt;
2721 break;
2722 case LE_LINK:
2723 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2724 break;
2725 default:
2726 cnt = 0;
2727 BT_ERR("Unknown link type");
2728 }
2729
2730 q = cnt / num;
1da177e4
LT
2731 *quote = q ? q : 1;
2732 } else
2733 *quote = 0;
2734
2735 BT_DBG("conn %p quote %d", conn, *quote);
2736 return conn;
2737}
2738
6039aa73 2739static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2740{
2741 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2742 struct hci_conn *c;
1da177e4 2743
bae1f5d9 2744 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2745
bf4c6325
GP
2746 rcu_read_lock();
2747
1da177e4 2748 /* Kill stalled connections */
bf4c6325 2749 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2750 if (c->type == type && c->sent) {
6ed93dc6
AE
2751 BT_ERR("%s killing stalled connection %pMR",
2752 hdev->name, &c->dst);
bed71748 2753 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2754 }
2755 }
bf4c6325
GP
2756
2757 rcu_read_unlock();
1da177e4
LT
2758}
2759
6039aa73
GP
2760static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2761 int *quote)
1da177e4 2762{
73d80deb
LAD
2763 struct hci_conn_hash *h = &hdev->conn_hash;
2764 struct hci_chan *chan = NULL;
abc5de8f 2765 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2766 struct hci_conn *conn;
73d80deb
LAD
2767 int cnt, q, conn_num = 0;
2768
2769 BT_DBG("%s", hdev->name);
2770
bf4c6325
GP
2771 rcu_read_lock();
2772
2773 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2774 struct hci_chan *tmp;
2775
2776 if (conn->type != type)
2777 continue;
2778
2779 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2780 continue;
2781
2782 conn_num++;
2783
8192edef 2784 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2785 struct sk_buff *skb;
2786
2787 if (skb_queue_empty(&tmp->data_q))
2788 continue;
2789
2790 skb = skb_peek(&tmp->data_q);
2791 if (skb->priority < cur_prio)
2792 continue;
2793
2794 if (skb->priority > cur_prio) {
2795 num = 0;
2796 min = ~0;
2797 cur_prio = skb->priority;
2798 }
2799
2800 num++;
2801
2802 if (conn->sent < min) {
2803 min = conn->sent;
2804 chan = tmp;
2805 }
2806 }
2807
2808 if (hci_conn_num(hdev, type) == conn_num)
2809 break;
2810 }
2811
bf4c6325
GP
2812 rcu_read_unlock();
2813
73d80deb
LAD
2814 if (!chan)
2815 return NULL;
2816
2817 switch (chan->conn->type) {
2818 case ACL_LINK:
2819 cnt = hdev->acl_cnt;
2820 break;
bd1eb66b
AE
2821 case AMP_LINK:
2822 cnt = hdev->block_cnt;
2823 break;
73d80deb
LAD
2824 case SCO_LINK:
2825 case ESCO_LINK:
2826 cnt = hdev->sco_cnt;
2827 break;
2828 case LE_LINK:
2829 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2830 break;
2831 default:
2832 cnt = 0;
2833 BT_ERR("Unknown link type");
2834 }
2835
2836 q = cnt / num;
2837 *quote = q ? q : 1;
2838 BT_DBG("chan %p quote %d", chan, *quote);
2839 return chan;
2840}
2841
02b20f0b
LAD
2842static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2843{
2844 struct hci_conn_hash *h = &hdev->conn_hash;
2845 struct hci_conn *conn;
2846 int num = 0;
2847
2848 BT_DBG("%s", hdev->name);
2849
bf4c6325
GP
2850 rcu_read_lock();
2851
2852 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2853 struct hci_chan *chan;
2854
2855 if (conn->type != type)
2856 continue;
2857
2858 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2859 continue;
2860
2861 num++;
2862
8192edef 2863 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2864 struct sk_buff *skb;
2865
2866 if (chan->sent) {
2867 chan->sent = 0;
2868 continue;
2869 }
2870
2871 if (skb_queue_empty(&chan->data_q))
2872 continue;
2873
2874 skb = skb_peek(&chan->data_q);
2875 if (skb->priority >= HCI_PRIO_MAX - 1)
2876 continue;
2877
2878 skb->priority = HCI_PRIO_MAX - 1;
2879
2880 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2881 skb->priority);
02b20f0b
LAD
2882 }
2883
2884 if (hci_conn_num(hdev, type) == num)
2885 break;
2886 }
bf4c6325
GP
2887
2888 rcu_read_unlock();
2889
02b20f0b
LAD
2890}
2891
b71d385a
AE
2892static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2893{
2894 /* Calculate count of blocks used by this packet */
2895 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2896}
2897
6039aa73 2898static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2899{
1da177e4
LT
2900 if (!test_bit(HCI_RAW, &hdev->flags)) {
2901 /* ACL tx timeout must be longer than maximum
2902 * link supervision timeout (40.9 seconds) */
63d2bc1b 2903 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2904 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2905 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2906 }
63d2bc1b 2907}
1da177e4 2908
6039aa73 2909static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2910{
2911 unsigned int cnt = hdev->acl_cnt;
2912 struct hci_chan *chan;
2913 struct sk_buff *skb;
2914 int quote;
2915
2916 __check_timeout(hdev, cnt);
04837f64 2917
73d80deb 2918 while (hdev->acl_cnt &&
a8c5fb1a 2919 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2920 u32 priority = (skb_peek(&chan->data_q))->priority;
2921 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2922 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2923 skb->len, skb->priority);
73d80deb 2924
ec1cce24
LAD
2925 /* Stop if priority has changed */
2926 if (skb->priority < priority)
2927 break;
2928
2929 skb = skb_dequeue(&chan->data_q);
2930
73d80deb 2931 hci_conn_enter_active_mode(chan->conn,
04124681 2932 bt_cb(skb)->force_active);
04837f64 2933
1da177e4
LT
2934 hci_send_frame(skb);
2935 hdev->acl_last_tx = jiffies;
2936
2937 hdev->acl_cnt--;
73d80deb
LAD
2938 chan->sent++;
2939 chan->conn->sent++;
1da177e4
LT
2940 }
2941 }
02b20f0b
LAD
2942
2943 if (cnt != hdev->acl_cnt)
2944 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2945}
2946
6039aa73 2947static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2948{
63d2bc1b 2949 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2950 struct hci_chan *chan;
2951 struct sk_buff *skb;
2952 int quote;
bd1eb66b 2953 u8 type;
b71d385a 2954
63d2bc1b 2955 __check_timeout(hdev, cnt);
b71d385a 2956
bd1eb66b
AE
2957 BT_DBG("%s", hdev->name);
2958
2959 if (hdev->dev_type == HCI_AMP)
2960 type = AMP_LINK;
2961 else
2962 type = ACL_LINK;
2963
b71d385a 2964 while (hdev->block_cnt > 0 &&
bd1eb66b 2965 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
2966 u32 priority = (skb_peek(&chan->data_q))->priority;
2967 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2968 int blocks;
2969
2970 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2971 skb->len, skb->priority);
b71d385a
AE
2972
2973 /* Stop if priority has changed */
2974 if (skb->priority < priority)
2975 break;
2976
2977 skb = skb_dequeue(&chan->data_q);
2978
2979 blocks = __get_blocks(hdev, skb);
2980 if (blocks > hdev->block_cnt)
2981 return;
2982
2983 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2984 bt_cb(skb)->force_active);
b71d385a
AE
2985
2986 hci_send_frame(skb);
2987 hdev->acl_last_tx = jiffies;
2988
2989 hdev->block_cnt -= blocks;
2990 quote -= blocks;
2991
2992 chan->sent += blocks;
2993 chan->conn->sent += blocks;
2994 }
2995 }
2996
2997 if (cnt != hdev->block_cnt)
bd1eb66b 2998 hci_prio_recalculate(hdev, type);
b71d385a
AE
2999}
3000
6039aa73 3001static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3002{
3003 BT_DBG("%s", hdev->name);
3004
bd1eb66b
AE
3005 /* No ACL link over BR/EDR controller */
3006 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3007 return;
3008
3009 /* No AMP link over AMP controller */
3010 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3011 return;
3012
3013 switch (hdev->flow_ctl_mode) {
3014 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3015 hci_sched_acl_pkt(hdev);
3016 break;
3017
3018 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3019 hci_sched_acl_blk(hdev);
3020 break;
3021 }
3022}
3023
1da177e4 3024/* Schedule SCO */
6039aa73 3025static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3026{
3027 struct hci_conn *conn;
3028 struct sk_buff *skb;
3029 int quote;
3030
3031 BT_DBG("%s", hdev->name);
3032
52087a79
LAD
3033 if (!hci_conn_num(hdev, SCO_LINK))
3034 return;
3035
1da177e4
LT
3036 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3037 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3038 BT_DBG("skb %p len %d", skb, skb->len);
3039 hci_send_frame(skb);
3040
3041 conn->sent++;
3042 if (conn->sent == ~0)
3043 conn->sent = 0;
3044 }
3045 }
3046}
3047
6039aa73 3048static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3049{
3050 struct hci_conn *conn;
3051 struct sk_buff *skb;
3052 int quote;
3053
3054 BT_DBG("%s", hdev->name);
3055
52087a79
LAD
3056 if (!hci_conn_num(hdev, ESCO_LINK))
3057 return;
3058
8fc9ced3
GP
3059 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3060 &quote))) {
b6a0dc82
MH
3061 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3062 BT_DBG("skb %p len %d", skb, skb->len);
3063 hci_send_frame(skb);
3064
3065 conn->sent++;
3066 if (conn->sent == ~0)
3067 conn->sent = 0;
3068 }
3069 }
3070}
3071
6039aa73 3072static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3073{
73d80deb 3074 struct hci_chan *chan;
6ed58ec5 3075 struct sk_buff *skb;
02b20f0b 3076 int quote, cnt, tmp;
6ed58ec5
VT
3077
3078 BT_DBG("%s", hdev->name);
3079
52087a79
LAD
3080 if (!hci_conn_num(hdev, LE_LINK))
3081 return;
3082
6ed58ec5
VT
3083 if (!test_bit(HCI_RAW, &hdev->flags)) {
3084 /* LE tx timeout must be longer than maximum
3085 * link supervision timeout (40.9 seconds) */
bae1f5d9 3086 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3087 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3088 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3089 }
3090
3091 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3092 tmp = cnt;
73d80deb 3093 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3094 u32 priority = (skb_peek(&chan->data_q))->priority;
3095 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3096 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3097 skb->len, skb->priority);
6ed58ec5 3098
ec1cce24
LAD
3099 /* Stop if priority has changed */
3100 if (skb->priority < priority)
3101 break;
3102
3103 skb = skb_dequeue(&chan->data_q);
3104
6ed58ec5
VT
3105 hci_send_frame(skb);
3106 hdev->le_last_tx = jiffies;
3107
3108 cnt--;
73d80deb
LAD
3109 chan->sent++;
3110 chan->conn->sent++;
6ed58ec5
VT
3111 }
3112 }
73d80deb 3113
6ed58ec5
VT
3114 if (hdev->le_pkts)
3115 hdev->le_cnt = cnt;
3116 else
3117 hdev->acl_cnt = cnt;
02b20f0b
LAD
3118
3119 if (cnt != tmp)
3120 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3121}
3122
3eff45ea 3123static void hci_tx_work(struct work_struct *work)
1da177e4 3124{
3eff45ea 3125 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3126 struct sk_buff *skb;
3127
6ed58ec5 3128 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3129 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3130
3131 /* Schedule queues and send stuff to HCI driver */
3132
3133 hci_sched_acl(hdev);
3134
3135 hci_sched_sco(hdev);
3136
b6a0dc82
MH
3137 hci_sched_esco(hdev);
3138
6ed58ec5
VT
3139 hci_sched_le(hdev);
3140
1da177e4
LT
3141 /* Send next queued raw (unknown type) packet */
3142 while ((skb = skb_dequeue(&hdev->raw_q)))
3143 hci_send_frame(skb);
1da177e4
LT
3144}
3145
25985edc 3146/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3147
3148/* ACL data packet */
6039aa73 3149static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3150{
3151 struct hci_acl_hdr *hdr = (void *) skb->data;
3152 struct hci_conn *conn;
3153 __u16 handle, flags;
3154
3155 skb_pull(skb, HCI_ACL_HDR_SIZE);
3156
3157 handle = __le16_to_cpu(hdr->handle);
3158 flags = hci_flags(handle);
3159 handle = hci_handle(handle);
3160
f0e09510 3161 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3162 handle, flags);
1da177e4
LT
3163
3164 hdev->stat.acl_rx++;
3165
3166 hci_dev_lock(hdev);
3167 conn = hci_conn_hash_lookup_handle(hdev, handle);
3168 hci_dev_unlock(hdev);
8e87d142 3169
1da177e4 3170 if (conn) {
65983fc7 3171 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3172
1da177e4 3173 /* Send to upper protocol */
686ebf28
UF
3174 l2cap_recv_acldata(conn, skb, flags);
3175 return;
1da177e4 3176 } else {
8e87d142 3177 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3178 hdev->name, handle);
1da177e4
LT
3179 }
3180
3181 kfree_skb(skb);
3182}
3183
3184/* SCO data packet */
6039aa73 3185static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3186{
3187 struct hci_sco_hdr *hdr = (void *) skb->data;
3188 struct hci_conn *conn;
3189 __u16 handle;
3190
3191 skb_pull(skb, HCI_SCO_HDR_SIZE);
3192
3193 handle = __le16_to_cpu(hdr->handle);
3194
f0e09510 3195 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3196
3197 hdev->stat.sco_rx++;
3198
3199 hci_dev_lock(hdev);
3200 conn = hci_conn_hash_lookup_handle(hdev, handle);
3201 hci_dev_unlock(hdev);
3202
3203 if (conn) {
1da177e4 3204 /* Send to upper protocol */
686ebf28
UF
3205 sco_recv_scodata(conn, skb);
3206 return;
1da177e4 3207 } else {
8e87d142 3208 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3209 hdev->name, handle);
1da177e4
LT
3210 }
3211
3212 kfree_skb(skb);
3213}
3214
9238f36a
JH
3215static bool hci_req_is_complete(struct hci_dev *hdev)
3216{
3217 struct sk_buff *skb;
3218
3219 skb = skb_peek(&hdev->cmd_q);
3220 if (!skb)
3221 return true;
3222
3223 return bt_cb(skb)->req.start;
3224}
3225
42c6b129
JH
3226static void hci_resend_last(struct hci_dev *hdev)
3227{
3228 struct hci_command_hdr *sent;
3229 struct sk_buff *skb;
3230 u16 opcode;
3231
3232 if (!hdev->sent_cmd)
3233 return;
3234
3235 sent = (void *) hdev->sent_cmd->data;
3236 opcode = __le16_to_cpu(sent->opcode);
3237 if (opcode == HCI_OP_RESET)
3238 return;
3239
3240 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3241 if (!skb)
3242 return;
3243
3244 skb_queue_head(&hdev->cmd_q, skb);
3245 queue_work(hdev->workqueue, &hdev->cmd_work);
3246}
3247
9238f36a
JH
3248void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3249{
3250 hci_req_complete_t req_complete = NULL;
3251 struct sk_buff *skb;
3252 unsigned long flags;
3253
3254 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3255
42c6b129
JH
3256 /* If the completed command doesn't match the last one that was
3257 * sent we need to do special handling of it.
9238f36a 3258 */
42c6b129
JH
3259 if (!hci_sent_cmd_data(hdev, opcode)) {
3260 /* Some CSR based controllers generate a spontaneous
3261 * reset complete event during init and any pending
3262 * command will never be completed. In such a case we
3263 * need to resend whatever was the last sent
3264 * command.
3265 */
3266 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3267 hci_resend_last(hdev);
3268
9238f36a 3269 return;
42c6b129 3270 }
9238f36a
JH
3271
3272 /* If the command succeeded and there's still more commands in
3273 * this request the request is not yet complete.
3274 */
3275 if (!status && !hci_req_is_complete(hdev))
3276 return;
3277
3278 /* If this was the last command in a request the complete
3279 * callback would be found in hdev->sent_cmd instead of the
3280 * command queue (hdev->cmd_q).
3281 */
3282 if (hdev->sent_cmd) {
3283 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3284 if (req_complete)
3285 goto call_complete;
3286 }
3287
3288 /* Remove all pending commands belonging to this request */
3289 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3290 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3291 if (bt_cb(skb)->req.start) {
3292 __skb_queue_head(&hdev->cmd_q, skb);
3293 break;
3294 }
3295
3296 req_complete = bt_cb(skb)->req.complete;
3297 kfree_skb(skb);
3298 }
3299 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3300
3301call_complete:
3302 if (req_complete)
3303 req_complete(hdev, status);
3304}
3305
3306void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3307{
3308 hci_req_complete_t req_complete = NULL;
3309
3310 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3311
3312 if (status) {
3313 hci_req_cmd_complete(hdev, opcode, status);
3314 return;
3315 }
3316
3317 /* No need to handle success status if there are more commands */
3318 if (!hci_req_is_complete(hdev))
3319 return;
3320
3321 if (hdev->sent_cmd)
3322 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3323
3324 /* If the request doesn't have a complete callback or there
3325 * are other commands/requests in the hdev queue we consider
3326 * this request as completed.
3327 */
3328 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3329 hci_req_cmd_complete(hdev, opcode, status);
3330}
3331
b78752cc 3332static void hci_rx_work(struct work_struct *work)
1da177e4 3333{
b78752cc 3334 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3335 struct sk_buff *skb;
3336
3337 BT_DBG("%s", hdev->name);
3338
1da177e4 3339 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3340 /* Send copy to monitor */
3341 hci_send_to_monitor(hdev, skb);
3342
1da177e4
LT
3343 if (atomic_read(&hdev->promisc)) {
3344 /* Send copy to the sockets */
470fe1b5 3345 hci_send_to_sock(hdev, skb);
1da177e4
LT
3346 }
3347
3348 if (test_bit(HCI_RAW, &hdev->flags)) {
3349 kfree_skb(skb);
3350 continue;
3351 }
3352
3353 if (test_bit(HCI_INIT, &hdev->flags)) {
3354 /* Don't process data packets in this states. */
0d48d939 3355 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3356 case HCI_ACLDATA_PKT:
3357 case HCI_SCODATA_PKT:
3358 kfree_skb(skb);
3359 continue;
3ff50b79 3360 }
1da177e4
LT
3361 }
3362
3363 /* Process frame */
0d48d939 3364 switch (bt_cb(skb)->pkt_type) {
1da177e4 3365 case HCI_EVENT_PKT:
b78752cc 3366 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3367 hci_event_packet(hdev, skb);
3368 break;
3369
3370 case HCI_ACLDATA_PKT:
3371 BT_DBG("%s ACL data packet", hdev->name);
3372 hci_acldata_packet(hdev, skb);
3373 break;
3374
3375 case HCI_SCODATA_PKT:
3376 BT_DBG("%s SCO data packet", hdev->name);
3377 hci_scodata_packet(hdev, skb);
3378 break;
3379
3380 default:
3381 kfree_skb(skb);
3382 break;
3383 }
3384 }
1da177e4
LT
3385}
3386
c347b765 3387static void hci_cmd_work(struct work_struct *work)
1da177e4 3388{
c347b765 3389 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3390 struct sk_buff *skb;
3391
2104786b
AE
3392 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3393 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3394
1da177e4 3395 /* Send queued commands */
5a08ecce
AE
3396 if (atomic_read(&hdev->cmd_cnt)) {
3397 skb = skb_dequeue(&hdev->cmd_q);
3398 if (!skb)
3399 return;
3400
7585b97a 3401 kfree_skb(hdev->sent_cmd);
1da177e4 3402
70f23020
AE
3403 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3404 if (hdev->sent_cmd) {
1da177e4
LT
3405 atomic_dec(&hdev->cmd_cnt);
3406 hci_send_frame(skb);
7bdb8a5c
SJ
3407 if (test_bit(HCI_RESET, &hdev->flags))
3408 del_timer(&hdev->cmd_timer);
3409 else
3410 mod_timer(&hdev->cmd_timer,
5f246e89 3411 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3412 } else {
3413 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3414 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3415 }
3416 }
3417}
2519a1fc
AG
3418
3419int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3420{
3421 /* General inquiry access code (GIAC) */
3422 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3423 struct hci_cp_inquiry cp;
3424
3425 BT_DBG("%s", hdev->name);
3426
3427 if (test_bit(HCI_INQUIRY, &hdev->flags))
3428 return -EINPROGRESS;
3429
4663262c
JH
3430 inquiry_cache_flush(hdev);
3431
2519a1fc
AG
3432 memset(&cp, 0, sizeof(cp));
3433 memcpy(&cp.lap, lap, sizeof(cp.lap));
3434 cp.length = length;
3435
3436 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3437}
023d5049
AG
3438
3439int hci_cancel_inquiry(struct hci_dev *hdev)
3440{
3441 BT_DBG("%s", hdev->name);
3442
3443 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 3444 return -EALREADY;
023d5049
AG
3445
3446 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3447}
31f7956c
AG
3448
3449u8 bdaddr_to_le(u8 bdaddr_type)
3450{
3451 switch (bdaddr_type) {
3452 case BDADDR_LE_PUBLIC:
3453 return ADDR_LE_DEV_PUBLIC;
3454
3455 default:
3456 /* Fallback to LE Random address type */
3457 return ADDR_LE_DEV_RANDOM;
3458 }
3459}