Bluetooth: Fix waiting for EIR update when setting local name
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
01178cd4 83static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
84 void (*func)(struct hci_request *req,
85 unsigned long opt),
01178cd4 86 unsigned long opt, __u32 timeout)
1da177e4 87{
42c6b129 88 struct hci_request req;
1da177e4
LT
89 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
42c6b129
JH
94 hci_req_init(&req, hdev);
95
1da177e4
LT
96 hdev->req_status = HCI_REQ_PEND;
97
42c6b129 98 func(&req, opt);
53cce22d 99
42c6b129
JH
100 err = hci_req_run(&req, hci_req_sync_complete);
101 if (err < 0) {
53cce22d 102 hdev->req_status = 0;
920c8300
AG
103
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
42c6b129 108 */
920c8300
AG
109 if (err == -ENODATA)
110 return 0;
111
112 return err;
53cce22d
JH
113 }
114
bc4445c7
AG
115 add_wait_queue(&hdev->req_wait_q, &wait);
116 set_current_state(TASK_INTERRUPTIBLE);
117
1da177e4
LT
118 schedule_timeout(timeout);
119
120 remove_wait_queue(&hdev->req_wait_q, &wait);
121
122 if (signal_pending(current))
123 return -EINTR;
124
125 switch (hdev->req_status) {
126 case HCI_REQ_DONE:
e175072f 127 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
128 break;
129
130 case HCI_REQ_CANCELED:
131 err = -hdev->req_result;
132 break;
133
134 default:
135 err = -ETIMEDOUT;
136 break;
3ff50b79 137 }
1da177e4 138
a5040efa 139 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
140
141 BT_DBG("%s end: err %d", hdev->name, err);
142
143 return err;
144}
145
01178cd4 146static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
147 void (*req)(struct hci_request *req,
148 unsigned long opt),
01178cd4 149 unsigned long opt, __u32 timeout)
1da177e4
LT
150{
151 int ret;
152
7c6a329e
MH
153 if (!test_bit(HCI_UP, &hdev->flags))
154 return -ENETDOWN;
155
1da177e4
LT
156 /* Serialize all requests */
157 hci_req_lock(hdev);
01178cd4 158 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
159 hci_req_unlock(hdev);
160
161 return ret;
162}
163
42c6b129 164static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 165{
42c6b129 166 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
167
168 /* Reset device */
42c6b129
JH
169 set_bit(HCI_RESET, &req->hdev->flags);
170 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
171}
172
42c6b129 173static void bredr_init(struct hci_request *req)
1da177e4 174{
42c6b129 175 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 176
1da177e4 177 /* Read Local Supported Features */
42c6b129 178 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 179
1143e5a6 180 /* Read Local Version */
42c6b129 181 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
182
183 /* Read BD Address */
42c6b129 184 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
185}
186
42c6b129 187static void amp_init(struct hci_request *req)
e61ef499 188{
42c6b129 189 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 190
e61ef499 191 /* Read Local Version */
42c6b129 192 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
193
194 /* Read Local AMP Info */
42c6b129 195 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
196
197 /* Read Data Blk size */
42c6b129 198 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
199}
200
42c6b129 201static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 202{
42c6b129
JH
203 struct hci_dev *hdev = req->hdev;
204 struct hci_request init_req;
e61ef499
AE
205 struct sk_buff *skb;
206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
42c6b129
JH
211 hci_req_init(&init_req, hdev);
212
e61ef499
AE
213 /* Special commands */
214 while ((skb = skb_dequeue(&hdev->driver_init))) {
215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216 skb->dev = (void *) hdev;
217
42c6b129
JH
218 if (skb_queue_empty(&init_req.cmd_q))
219 bt_cb(skb)->req.start = true;
220
221 skb_queue_tail(&init_req.cmd_q, skb);
e61ef499
AE
222 }
223 skb_queue_purge(&hdev->driver_init);
224
42c6b129
JH
225 hci_req_run(&init_req, NULL);
226
11778716
AE
227 /* Reset */
228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 229 hci_reset_req(req, 0);
11778716 230
e61ef499
AE
231 switch (hdev->dev_type) {
232 case HCI_BREDR:
42c6b129 233 bredr_init(req);
e61ef499
AE
234 break;
235
236 case HCI_AMP:
42c6b129 237 amp_init(req);
e61ef499
AE
238 break;
239
240 default:
241 BT_ERR("Unknown device type %d", hdev->dev_type);
242 break;
243 }
e61ef499
AE
244}
245
42c6b129 246static void bredr_setup(struct hci_request *req)
2177bab5
JH
247{
248 struct hci_cp_delete_stored_link_key cp;
249 __le16 param;
250 __u8 flt_type;
251
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 253 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
254
255 /* Read Class of Device */
42c6b129 256 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
257
258 /* Read Local Name */
42c6b129 259 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
260
261 /* Read Voice Setting */
42c6b129 262 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
263
264 /* Clear Event Filters */
265 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 266 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
267
268 /* Connection accept timeout ~20 secs */
269 param = __constant_cpu_to_le16(0x7d00);
42c6b129 270 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
271
272 bacpy(&cp.bdaddr, BDADDR_ANY);
273 cp.delete_all = 0x01;
42c6b129 274 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
2177bab5
JH
275}
276
42c6b129 277static void le_setup(struct hci_request *req)
2177bab5
JH
278{
279 /* Read LE Buffer Size */
42c6b129 280 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
281
282 /* Read LE Local Supported Features */
42c6b129 283 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
284
285 /* Read LE Advertising Channel TX Power */
42c6b129 286 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
287
288 /* Read LE White List Size */
42c6b129 289 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
290
291 /* Read LE Supported States */
42c6b129 292 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
2177bab5
JH
293}
294
295static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
296{
297 if (lmp_ext_inq_capable(hdev))
298 return 0x02;
299
300 if (lmp_inq_rssi_capable(hdev))
301 return 0x01;
302
303 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
304 hdev->lmp_subver == 0x0757)
305 return 0x01;
306
307 if (hdev->manufacturer == 15) {
308 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
309 return 0x01;
310 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
311 return 0x01;
312 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
313 return 0x01;
314 }
315
316 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
317 hdev->lmp_subver == 0x1805)
318 return 0x01;
319
320 return 0x00;
321}
322
42c6b129 323static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
324{
325 u8 mode;
326
42c6b129 327 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 328
42c6b129 329 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
330}
331
42c6b129 332static void hci_setup_event_mask(struct hci_request *req)
2177bab5 333{
42c6b129
JH
334 struct hci_dev *hdev = req->hdev;
335
2177bab5
JH
336 /* The second byte is 0xff instead of 0x9f (two reserved bits
337 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
338 * command otherwise.
339 */
340 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
341
342 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
343 * any event mask for pre 1.2 devices.
344 */
345 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
346 return;
347
348 if (lmp_bredr_capable(hdev)) {
349 events[4] |= 0x01; /* Flow Specification Complete */
350 events[4] |= 0x02; /* Inquiry Result with RSSI */
351 events[4] |= 0x04; /* Read Remote Extended Features Complete */
352 events[5] |= 0x08; /* Synchronous Connection Complete */
353 events[5] |= 0x10; /* Synchronous Connection Changed */
354 }
355
356 if (lmp_inq_rssi_capable(hdev))
357 events[4] |= 0x02; /* Inquiry Result with RSSI */
358
359 if (lmp_sniffsubr_capable(hdev))
360 events[5] |= 0x20; /* Sniff Subrating */
361
362 if (lmp_pause_enc_capable(hdev))
363 events[5] |= 0x80; /* Encryption Key Refresh Complete */
364
365 if (lmp_ext_inq_capable(hdev))
366 events[5] |= 0x40; /* Extended Inquiry Result */
367
368 if (lmp_no_flush_capable(hdev))
369 events[7] |= 0x01; /* Enhanced Flush Complete */
370
371 if (lmp_lsto_capable(hdev))
372 events[6] |= 0x80; /* Link Supervision Timeout Changed */
373
374 if (lmp_ssp_capable(hdev)) {
375 events[6] |= 0x01; /* IO Capability Request */
376 events[6] |= 0x02; /* IO Capability Response */
377 events[6] |= 0x04; /* User Confirmation Request */
378 events[6] |= 0x08; /* User Passkey Request */
379 events[6] |= 0x10; /* Remote OOB Data Request */
380 events[6] |= 0x20; /* Simple Pairing Complete */
381 events[7] |= 0x04; /* User Passkey Notification */
382 events[7] |= 0x08; /* Keypress Notification */
383 events[7] |= 0x10; /* Remote Host Supported
384 * Features Notification
385 */
386 }
387
388 if (lmp_le_capable(hdev))
389 events[7] |= 0x20; /* LE Meta-Event */
390
42c6b129 391 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
392
393 if (lmp_le_capable(hdev)) {
394 memset(events, 0, sizeof(events));
395 events[0] = 0x1f;
42c6b129
JH
396 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
397 sizeof(events), events);
2177bab5
JH
398 }
399}
400
42c6b129 401static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 402{
42c6b129
JH
403 struct hci_dev *hdev = req->hdev;
404
2177bab5 405 if (lmp_bredr_capable(hdev))
42c6b129 406 bredr_setup(req);
2177bab5
JH
407
408 if (lmp_le_capable(hdev))
42c6b129 409 le_setup(req);
2177bab5 410
42c6b129 411 hci_setup_event_mask(req);
2177bab5
JH
412
413 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 414 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
415
416 if (lmp_ssp_capable(hdev)) {
417 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
418 u8 mode = 0x01;
42c6b129
JH
419 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
420 sizeof(mode), &mode);
2177bab5
JH
421 } else {
422 struct hci_cp_write_eir cp;
423
424 memset(hdev->eir, 0, sizeof(hdev->eir));
425 memset(&cp, 0, sizeof(cp));
426
42c6b129 427 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
428 }
429 }
430
431 if (lmp_inq_rssi_capable(hdev))
42c6b129 432 hci_setup_inquiry_mode(req);
2177bab5
JH
433
434 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 435 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
436
437 if (lmp_ext_feat_capable(hdev)) {
438 struct hci_cp_read_local_ext_features cp;
439
440 cp.page = 0x01;
42c6b129
JH
441 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
442 sizeof(cp), &cp);
2177bab5
JH
443 }
444
445 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
446 u8 enable = 1;
42c6b129
JH
447 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
448 &enable);
2177bab5
JH
449 }
450}
451
42c6b129 452static void hci_setup_link_policy(struct hci_request *req)
2177bab5 453{
42c6b129 454 struct hci_dev *hdev = req->hdev;
2177bab5
JH
455 struct hci_cp_write_def_link_policy cp;
456 u16 link_policy = 0;
457
458 if (lmp_rswitch_capable(hdev))
459 link_policy |= HCI_LP_RSWITCH;
460 if (lmp_hold_capable(hdev))
461 link_policy |= HCI_LP_HOLD;
462 if (lmp_sniff_capable(hdev))
463 link_policy |= HCI_LP_SNIFF;
464 if (lmp_park_capable(hdev))
465 link_policy |= HCI_LP_PARK;
466
467 cp.policy = cpu_to_le16(link_policy);
42c6b129 468 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
469}
470
42c6b129 471static void hci_set_le_support(struct hci_request *req)
2177bab5 472{
42c6b129 473 struct hci_dev *hdev = req->hdev;
2177bab5
JH
474 struct hci_cp_write_le_host_supported cp;
475
476 memset(&cp, 0, sizeof(cp));
477
478 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
479 cp.le = 0x01;
480 cp.simul = lmp_le_br_capable(hdev);
481 }
482
483 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
484 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
485 &cp);
2177bab5
JH
486}
487
42c6b129 488static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 489{
42c6b129
JH
490 struct hci_dev *hdev = req->hdev;
491
2177bab5 492 if (hdev->commands[5] & 0x10)
42c6b129 493 hci_setup_link_policy(req);
2177bab5
JH
494
495 if (lmp_le_capable(hdev))
42c6b129 496 hci_set_le_support(req);
2177bab5
JH
497}
498
499static int __hci_init(struct hci_dev *hdev)
500{
501 int err;
502
503 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
504 if (err < 0)
505 return err;
506
507 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
508 * BR/EDR/LE type controllers. AMP controllers only need the
509 * first stage init.
510 */
511 if (hdev->dev_type != HCI_BREDR)
512 return 0;
513
514 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
515 if (err < 0)
516 return err;
517
518 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
519}
520
42c6b129 521static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
522{
523 __u8 scan = opt;
524
42c6b129 525 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
526
527 /* Inquiry and Page scans */
42c6b129 528 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
529}
530
42c6b129 531static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
532{
533 __u8 auth = opt;
534
42c6b129 535 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
536
537 /* Authentication */
42c6b129 538 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
539}
540
42c6b129 541static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
542{
543 __u8 encrypt = opt;
544
42c6b129 545 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 546
e4e8e37c 547 /* Encryption */
42c6b129 548 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
549}
550
42c6b129 551static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
552{
553 __le16 policy = cpu_to_le16(opt);
554
42c6b129 555 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
556
557 /* Default link policy */
42c6b129 558 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
559}
560
8e87d142 561/* Get HCI device by index.
1da177e4
LT
562 * Device is held on return. */
563struct hci_dev *hci_dev_get(int index)
564{
8035ded4 565 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
566
567 BT_DBG("%d", index);
568
569 if (index < 0)
570 return NULL;
571
572 read_lock(&hci_dev_list_lock);
8035ded4 573 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
574 if (d->id == index) {
575 hdev = hci_dev_hold(d);
576 break;
577 }
578 }
579 read_unlock(&hci_dev_list_lock);
580 return hdev;
581}
1da177e4
LT
582
583/* ---- Inquiry support ---- */
ff9ef578 584
30dc78e1
JH
585bool hci_discovery_active(struct hci_dev *hdev)
586{
587 struct discovery_state *discov = &hdev->discovery;
588
6fbe195d 589 switch (discov->state) {
343f935b 590 case DISCOVERY_FINDING:
6fbe195d 591 case DISCOVERY_RESOLVING:
30dc78e1
JH
592 return true;
593
6fbe195d
AG
594 default:
595 return false;
596 }
30dc78e1
JH
597}
598
ff9ef578
JH
599void hci_discovery_set_state(struct hci_dev *hdev, int state)
600{
601 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
602
603 if (hdev->discovery.state == state)
604 return;
605
606 switch (state) {
607 case DISCOVERY_STOPPED:
7b99b659
AG
608 if (hdev->discovery.state != DISCOVERY_STARTING)
609 mgmt_discovering(hdev, 0);
ff9ef578
JH
610 break;
611 case DISCOVERY_STARTING:
612 break;
343f935b 613 case DISCOVERY_FINDING:
ff9ef578
JH
614 mgmt_discovering(hdev, 1);
615 break;
30dc78e1
JH
616 case DISCOVERY_RESOLVING:
617 break;
ff9ef578
JH
618 case DISCOVERY_STOPPING:
619 break;
620 }
621
622 hdev->discovery.state = state;
623}
624
1da177e4
LT
625static void inquiry_cache_flush(struct hci_dev *hdev)
626{
30883512 627 struct discovery_state *cache = &hdev->discovery;
b57c1a56 628 struct inquiry_entry *p, *n;
1da177e4 629
561aafbc
JH
630 list_for_each_entry_safe(p, n, &cache->all, all) {
631 list_del(&p->all);
b57c1a56 632 kfree(p);
1da177e4 633 }
561aafbc
JH
634
635 INIT_LIST_HEAD(&cache->unknown);
636 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
637}
638
a8c5fb1a
GP
639struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
640 bdaddr_t *bdaddr)
1da177e4 641{
30883512 642 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
643 struct inquiry_entry *e;
644
6ed93dc6 645 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 646
561aafbc
JH
647 list_for_each_entry(e, &cache->all, all) {
648 if (!bacmp(&e->data.bdaddr, bdaddr))
649 return e;
650 }
651
652 return NULL;
653}
654
655struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 656 bdaddr_t *bdaddr)
561aafbc 657{
30883512 658 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
659 struct inquiry_entry *e;
660
6ed93dc6 661 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
662
663 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 664 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
665 return e;
666 }
667
668 return NULL;
1da177e4
LT
669}
670
30dc78e1 671struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
672 bdaddr_t *bdaddr,
673 int state)
30dc78e1
JH
674{
675 struct discovery_state *cache = &hdev->discovery;
676 struct inquiry_entry *e;
677
6ed93dc6 678 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
679
680 list_for_each_entry(e, &cache->resolve, list) {
681 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
682 return e;
683 if (!bacmp(&e->data.bdaddr, bdaddr))
684 return e;
685 }
686
687 return NULL;
688}
689
a3d4e20a 690void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 691 struct inquiry_entry *ie)
a3d4e20a
JH
692{
693 struct discovery_state *cache = &hdev->discovery;
694 struct list_head *pos = &cache->resolve;
695 struct inquiry_entry *p;
696
697 list_del(&ie->list);
698
699 list_for_each_entry(p, &cache->resolve, list) {
700 if (p->name_state != NAME_PENDING &&
a8c5fb1a 701 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
702 break;
703 pos = &p->list;
704 }
705
706 list_add(&ie->list, pos);
707}
708
3175405b 709bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 710 bool name_known, bool *ssp)
1da177e4 711{
30883512 712 struct discovery_state *cache = &hdev->discovery;
70f23020 713 struct inquiry_entry *ie;
1da177e4 714
6ed93dc6 715 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 716
2b2fec4d
SJ
717 hci_remove_remote_oob_data(hdev, &data->bdaddr);
718
388fc8fa
JH
719 if (ssp)
720 *ssp = data->ssp_mode;
721
70f23020 722 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 723 if (ie) {
388fc8fa
JH
724 if (ie->data.ssp_mode && ssp)
725 *ssp = true;
726
a3d4e20a 727 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 728 data->rssi != ie->data.rssi) {
a3d4e20a
JH
729 ie->data.rssi = data->rssi;
730 hci_inquiry_cache_update_resolve(hdev, ie);
731 }
732
561aafbc 733 goto update;
a3d4e20a 734 }
561aafbc
JH
735
736 /* Entry not in the cache. Add new one. */
737 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
738 if (!ie)
3175405b 739 return false;
561aafbc
JH
740
741 list_add(&ie->all, &cache->all);
742
743 if (name_known) {
744 ie->name_state = NAME_KNOWN;
745 } else {
746 ie->name_state = NAME_NOT_KNOWN;
747 list_add(&ie->list, &cache->unknown);
748 }
70f23020 749
561aafbc
JH
750update:
751 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 752 ie->name_state != NAME_PENDING) {
561aafbc
JH
753 ie->name_state = NAME_KNOWN;
754 list_del(&ie->list);
1da177e4
LT
755 }
756
70f23020
AE
757 memcpy(&ie->data, data, sizeof(*data));
758 ie->timestamp = jiffies;
1da177e4 759 cache->timestamp = jiffies;
3175405b
JH
760
761 if (ie->name_state == NAME_NOT_KNOWN)
762 return false;
763
764 return true;
1da177e4
LT
765}
766
767static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
768{
30883512 769 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
770 struct inquiry_info *info = (struct inquiry_info *) buf;
771 struct inquiry_entry *e;
772 int copied = 0;
773
561aafbc 774 list_for_each_entry(e, &cache->all, all) {
1da177e4 775 struct inquiry_data *data = &e->data;
b57c1a56
JH
776
777 if (copied >= num)
778 break;
779
1da177e4
LT
780 bacpy(&info->bdaddr, &data->bdaddr);
781 info->pscan_rep_mode = data->pscan_rep_mode;
782 info->pscan_period_mode = data->pscan_period_mode;
783 info->pscan_mode = data->pscan_mode;
784 memcpy(info->dev_class, data->dev_class, 3);
785 info->clock_offset = data->clock_offset;
b57c1a56 786
1da177e4 787 info++;
b57c1a56 788 copied++;
1da177e4
LT
789 }
790
791 BT_DBG("cache %p, copied %d", cache, copied);
792 return copied;
793}
794
42c6b129 795static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
796{
797 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 798 struct hci_dev *hdev = req->hdev;
1da177e4
LT
799 struct hci_cp_inquiry cp;
800
801 BT_DBG("%s", hdev->name);
802
803 if (test_bit(HCI_INQUIRY, &hdev->flags))
804 return;
805
806 /* Start Inquiry */
807 memcpy(&cp.lap, &ir->lap, 3);
808 cp.length = ir->length;
809 cp.num_rsp = ir->num_rsp;
42c6b129 810 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
811}
812
813int hci_inquiry(void __user *arg)
814{
815 __u8 __user *ptr = arg;
816 struct hci_inquiry_req ir;
817 struct hci_dev *hdev;
818 int err = 0, do_inquiry = 0, max_rsp;
819 long timeo;
820 __u8 *buf;
821
822 if (copy_from_user(&ir, ptr, sizeof(ir)))
823 return -EFAULT;
824
5a08ecce
AE
825 hdev = hci_dev_get(ir.dev_id);
826 if (!hdev)
1da177e4
LT
827 return -ENODEV;
828
09fd0de5 829 hci_dev_lock(hdev);
8e87d142 830 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 831 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
832 inquiry_cache_flush(hdev);
833 do_inquiry = 1;
834 }
09fd0de5 835 hci_dev_unlock(hdev);
1da177e4 836
04837f64 837 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
838
839 if (do_inquiry) {
01178cd4
JH
840 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
841 timeo);
70f23020
AE
842 if (err < 0)
843 goto done;
844 }
1da177e4 845
8fc9ced3
GP
846 /* for unlimited number of responses we will use buffer with
847 * 255 entries
848 */
1da177e4
LT
849 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
850
851 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
852 * copy it to the user space.
853 */
01df8c31 854 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 855 if (!buf) {
1da177e4
LT
856 err = -ENOMEM;
857 goto done;
858 }
859
09fd0de5 860 hci_dev_lock(hdev);
1da177e4 861 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 862 hci_dev_unlock(hdev);
1da177e4
LT
863
864 BT_DBG("num_rsp %d", ir.num_rsp);
865
866 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
867 ptr += sizeof(ir);
868 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 869 ir.num_rsp))
1da177e4 870 err = -EFAULT;
8e87d142 871 } else
1da177e4
LT
872 err = -EFAULT;
873
874 kfree(buf);
875
876done:
877 hci_dev_put(hdev);
878 return err;
879}
880
3f0f524b
JH
881static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
882{
883 u8 ad_len = 0, flags = 0;
884 size_t name_len;
885
886 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
887 flags |= LE_AD_GENERAL;
888
889 if (!lmp_bredr_capable(hdev))
890 flags |= LE_AD_NO_BREDR;
891
892 if (lmp_le_br_capable(hdev))
893 flags |= LE_AD_SIM_LE_BREDR_CTRL;
894
895 if (lmp_host_le_br_capable(hdev))
896 flags |= LE_AD_SIM_LE_BREDR_HOST;
897
898 if (flags) {
899 BT_DBG("adv flags 0x%02x", flags);
900
901 ptr[0] = 2;
902 ptr[1] = EIR_FLAGS;
903 ptr[2] = flags;
904
905 ad_len += 3;
906 ptr += 3;
907 }
908
909 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
910 ptr[0] = 2;
911 ptr[1] = EIR_TX_POWER;
912 ptr[2] = (u8) hdev->adv_tx_power;
913
914 ad_len += 3;
915 ptr += 3;
916 }
917
918 name_len = strlen(hdev->dev_name);
919 if (name_len > 0) {
920 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
921
922 if (name_len > max_len) {
923 name_len = max_len;
924 ptr[1] = EIR_NAME_SHORT;
925 } else
926 ptr[1] = EIR_NAME_COMPLETE;
927
928 ptr[0] = name_len + 1;
929
930 memcpy(ptr + 2, hdev->dev_name, name_len);
931
932 ad_len += (name_len + 2);
933 ptr += (name_len + 2);
934 }
935
936 return ad_len;
937}
938
939int hci_update_ad(struct hci_dev *hdev)
940{
941 struct hci_cp_le_set_adv_data cp;
942 u8 len;
943 int err;
944
945 hci_dev_lock(hdev);
946
947 if (!lmp_le_capable(hdev)) {
948 err = -EINVAL;
949 goto unlock;
950 }
951
952 memset(&cp, 0, sizeof(cp));
953
954 len = create_ad(hdev, cp.data);
955
956 if (hdev->adv_data_len == len &&
957 memcmp(cp.data, hdev->adv_data, len) == 0) {
958 err = 0;
959 goto unlock;
960 }
961
962 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
963 hdev->adv_data_len = len;
964
965 cp.length = len;
966 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
967
968unlock:
969 hci_dev_unlock(hdev);
970
971 return err;
972}
973
1da177e4
LT
974/* ---- HCI ioctl helpers ---- */
975
976int hci_dev_open(__u16 dev)
977{
978 struct hci_dev *hdev;
979 int ret = 0;
980
5a08ecce
AE
981 hdev = hci_dev_get(dev);
982 if (!hdev)
1da177e4
LT
983 return -ENODEV;
984
985 BT_DBG("%s %p", hdev->name, hdev);
986
987 hci_req_lock(hdev);
988
94324962
JH
989 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
990 ret = -ENODEV;
991 goto done;
992 }
993
611b30f7
MH
994 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
995 ret = -ERFKILL;
996 goto done;
997 }
998
1da177e4
LT
999 if (test_bit(HCI_UP, &hdev->flags)) {
1000 ret = -EALREADY;
1001 goto done;
1002 }
1003
1004 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1005 set_bit(HCI_RAW, &hdev->flags);
1006
07e3b94a
AE
1007 /* Treat all non BR/EDR controllers as raw devices if
1008 enable_hs is not set */
1009 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
1010 set_bit(HCI_RAW, &hdev->flags);
1011
1da177e4
LT
1012 if (hdev->open(hdev)) {
1013 ret = -EIO;
1014 goto done;
1015 }
1016
1017 if (!test_bit(HCI_RAW, &hdev->flags)) {
1018 atomic_set(&hdev->cmd_cnt, 1);
1019 set_bit(HCI_INIT, &hdev->flags);
2177bab5 1020 ret = __hci_init(hdev);
1da177e4
LT
1021 clear_bit(HCI_INIT, &hdev->flags);
1022 }
1023
1024 if (!ret) {
1025 hci_dev_hold(hdev);
1026 set_bit(HCI_UP, &hdev->flags);
1027 hci_notify(hdev, HCI_DEV_UP);
3f0f524b 1028 hci_update_ad(hdev);
bb4b2a9a
AE
1029 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1030 mgmt_valid_hdev(hdev)) {
09fd0de5 1031 hci_dev_lock(hdev);
744cf19e 1032 mgmt_powered(hdev, 1);
09fd0de5 1033 hci_dev_unlock(hdev);
56e5cb86 1034 }
8e87d142 1035 } else {
1da177e4 1036 /* Init failed, cleanup */
3eff45ea 1037 flush_work(&hdev->tx_work);
c347b765 1038 flush_work(&hdev->cmd_work);
b78752cc 1039 flush_work(&hdev->rx_work);
1da177e4
LT
1040
1041 skb_queue_purge(&hdev->cmd_q);
1042 skb_queue_purge(&hdev->rx_q);
1043
1044 if (hdev->flush)
1045 hdev->flush(hdev);
1046
1047 if (hdev->sent_cmd) {
1048 kfree_skb(hdev->sent_cmd);
1049 hdev->sent_cmd = NULL;
1050 }
1051
1052 hdev->close(hdev);
1053 hdev->flags = 0;
1054 }
1055
1056done:
1057 hci_req_unlock(hdev);
1058 hci_dev_put(hdev);
1059 return ret;
1060}
1061
1062static int hci_dev_do_close(struct hci_dev *hdev)
1063{
1064 BT_DBG("%s %p", hdev->name, hdev);
1065
28b75a89
AG
1066 cancel_work_sync(&hdev->le_scan);
1067
78c04c0b
VCG
1068 cancel_delayed_work(&hdev->power_off);
1069
1da177e4
LT
1070 hci_req_cancel(hdev, ENODEV);
1071 hci_req_lock(hdev);
1072
1073 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1074 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1075 hci_req_unlock(hdev);
1076 return 0;
1077 }
1078
3eff45ea
GP
1079 /* Flush RX and TX works */
1080 flush_work(&hdev->tx_work);
b78752cc 1081 flush_work(&hdev->rx_work);
1da177e4 1082
16ab91ab 1083 if (hdev->discov_timeout > 0) {
e0f9309f 1084 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1085 hdev->discov_timeout = 0;
5e5282bb 1086 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1087 }
1088
a8b2d5c2 1089 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1090 cancel_delayed_work(&hdev->service_cache);
1091
7ba8b4be
AG
1092 cancel_delayed_work_sync(&hdev->le_scan_disable);
1093
09fd0de5 1094 hci_dev_lock(hdev);
1da177e4
LT
1095 inquiry_cache_flush(hdev);
1096 hci_conn_hash_flush(hdev);
09fd0de5 1097 hci_dev_unlock(hdev);
1da177e4
LT
1098
1099 hci_notify(hdev, HCI_DEV_DOWN);
1100
1101 if (hdev->flush)
1102 hdev->flush(hdev);
1103
1104 /* Reset device */
1105 skb_queue_purge(&hdev->cmd_q);
1106 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1107 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1108 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1109 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1110 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1111 clear_bit(HCI_INIT, &hdev->flags);
1112 }
1113
c347b765
GP
1114 /* flush cmd work */
1115 flush_work(&hdev->cmd_work);
1da177e4
LT
1116
1117 /* Drop queues */
1118 skb_queue_purge(&hdev->rx_q);
1119 skb_queue_purge(&hdev->cmd_q);
1120 skb_queue_purge(&hdev->raw_q);
1121
1122 /* Drop last sent command */
1123 if (hdev->sent_cmd) {
b79f44c1 1124 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1125 kfree_skb(hdev->sent_cmd);
1126 hdev->sent_cmd = NULL;
1127 }
1128
1129 /* After this point our queues are empty
1130 * and no tasks are scheduled. */
1131 hdev->close(hdev);
1132
35b973c9
JH
1133 /* Clear flags */
1134 hdev->flags = 0;
1135 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1136
bb4b2a9a
AE
1137 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1138 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1139 hci_dev_lock(hdev);
1140 mgmt_powered(hdev, 0);
1141 hci_dev_unlock(hdev);
1142 }
5add6af8 1143
ced5c338
AE
1144 /* Controller radio is available but is currently powered down */
1145 hdev->amp_status = 0;
1146
e59fda8d 1147 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1148 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1149
1da177e4
LT
1150 hci_req_unlock(hdev);
1151
1152 hci_dev_put(hdev);
1153 return 0;
1154}
1155
1156int hci_dev_close(__u16 dev)
1157{
1158 struct hci_dev *hdev;
1159 int err;
1160
70f23020
AE
1161 hdev = hci_dev_get(dev);
1162 if (!hdev)
1da177e4 1163 return -ENODEV;
8ee56540
MH
1164
1165 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1166 cancel_delayed_work(&hdev->power_off);
1167
1da177e4 1168 err = hci_dev_do_close(hdev);
8ee56540 1169
1da177e4
LT
1170 hci_dev_put(hdev);
1171 return err;
1172}
1173
1174int hci_dev_reset(__u16 dev)
1175{
1176 struct hci_dev *hdev;
1177 int ret = 0;
1178
70f23020
AE
1179 hdev = hci_dev_get(dev);
1180 if (!hdev)
1da177e4
LT
1181 return -ENODEV;
1182
1183 hci_req_lock(hdev);
1da177e4
LT
1184
1185 if (!test_bit(HCI_UP, &hdev->flags))
1186 goto done;
1187
1188 /* Drop queues */
1189 skb_queue_purge(&hdev->rx_q);
1190 skb_queue_purge(&hdev->cmd_q);
1191
09fd0de5 1192 hci_dev_lock(hdev);
1da177e4
LT
1193 inquiry_cache_flush(hdev);
1194 hci_conn_hash_flush(hdev);
09fd0de5 1195 hci_dev_unlock(hdev);
1da177e4
LT
1196
1197 if (hdev->flush)
1198 hdev->flush(hdev);
1199
8e87d142 1200 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1201 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1202
1203 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1204 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1205
1206done:
1da177e4
LT
1207 hci_req_unlock(hdev);
1208 hci_dev_put(hdev);
1209 return ret;
1210}
1211
1212int hci_dev_reset_stat(__u16 dev)
1213{
1214 struct hci_dev *hdev;
1215 int ret = 0;
1216
70f23020
AE
1217 hdev = hci_dev_get(dev);
1218 if (!hdev)
1da177e4
LT
1219 return -ENODEV;
1220
1221 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1222
1223 hci_dev_put(hdev);
1224
1225 return ret;
1226}
1227
1228int hci_dev_cmd(unsigned int cmd, void __user *arg)
1229{
1230 struct hci_dev *hdev;
1231 struct hci_dev_req dr;
1232 int err = 0;
1233
1234 if (copy_from_user(&dr, arg, sizeof(dr)))
1235 return -EFAULT;
1236
70f23020
AE
1237 hdev = hci_dev_get(dr.dev_id);
1238 if (!hdev)
1da177e4
LT
1239 return -ENODEV;
1240
1241 switch (cmd) {
1242 case HCISETAUTH:
01178cd4
JH
1243 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1244 HCI_INIT_TIMEOUT);
1da177e4
LT
1245 break;
1246
1247 case HCISETENCRYPT:
1248 if (!lmp_encrypt_capable(hdev)) {
1249 err = -EOPNOTSUPP;
1250 break;
1251 }
1252
1253 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1254 /* Auth must be enabled first */
01178cd4
JH
1255 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1256 HCI_INIT_TIMEOUT);
1da177e4
LT
1257 if (err)
1258 break;
1259 }
1260
01178cd4
JH
1261 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1262 HCI_INIT_TIMEOUT);
1da177e4
LT
1263 break;
1264
1265 case HCISETSCAN:
01178cd4
JH
1266 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1267 HCI_INIT_TIMEOUT);
1da177e4
LT
1268 break;
1269
1da177e4 1270 case HCISETLINKPOL:
01178cd4
JH
1271 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1272 HCI_INIT_TIMEOUT);
1da177e4
LT
1273 break;
1274
1275 case HCISETLINKMODE:
e4e8e37c
MH
1276 hdev->link_mode = ((__u16) dr.dev_opt) &
1277 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1278 break;
1279
1280 case HCISETPTYPE:
1281 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1282 break;
1283
1284 case HCISETACLMTU:
e4e8e37c
MH
1285 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1286 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1287 break;
1288
1289 case HCISETSCOMTU:
e4e8e37c
MH
1290 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1291 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1292 break;
1293
1294 default:
1295 err = -EINVAL;
1296 break;
1297 }
e4e8e37c 1298
1da177e4
LT
1299 hci_dev_put(hdev);
1300 return err;
1301}
1302
1303int hci_get_dev_list(void __user *arg)
1304{
8035ded4 1305 struct hci_dev *hdev;
1da177e4
LT
1306 struct hci_dev_list_req *dl;
1307 struct hci_dev_req *dr;
1da177e4
LT
1308 int n = 0, size, err;
1309 __u16 dev_num;
1310
1311 if (get_user(dev_num, (__u16 __user *) arg))
1312 return -EFAULT;
1313
1314 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1315 return -EINVAL;
1316
1317 size = sizeof(*dl) + dev_num * sizeof(*dr);
1318
70f23020
AE
1319 dl = kzalloc(size, GFP_KERNEL);
1320 if (!dl)
1da177e4
LT
1321 return -ENOMEM;
1322
1323 dr = dl->dev_req;
1324
f20d09d5 1325 read_lock(&hci_dev_list_lock);
8035ded4 1326 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1327 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1328 cancel_delayed_work(&hdev->power_off);
c542a06c 1329
a8b2d5c2
JH
1330 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1331 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1332
1da177e4
LT
1333 (dr + n)->dev_id = hdev->id;
1334 (dr + n)->dev_opt = hdev->flags;
c542a06c 1335
1da177e4
LT
1336 if (++n >= dev_num)
1337 break;
1338 }
f20d09d5 1339 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1340
1341 dl->dev_num = n;
1342 size = sizeof(*dl) + n * sizeof(*dr);
1343
1344 err = copy_to_user(arg, dl, size);
1345 kfree(dl);
1346
1347 return err ? -EFAULT : 0;
1348}
1349
1350int hci_get_dev_info(void __user *arg)
1351{
1352 struct hci_dev *hdev;
1353 struct hci_dev_info di;
1354 int err = 0;
1355
1356 if (copy_from_user(&di, arg, sizeof(di)))
1357 return -EFAULT;
1358
70f23020
AE
1359 hdev = hci_dev_get(di.dev_id);
1360 if (!hdev)
1da177e4
LT
1361 return -ENODEV;
1362
a8b2d5c2 1363 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1364 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1365
a8b2d5c2
JH
1366 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1367 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1368
1da177e4
LT
1369 strcpy(di.name, hdev->name);
1370 di.bdaddr = hdev->bdaddr;
943da25d 1371 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1372 di.flags = hdev->flags;
1373 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1374 if (lmp_bredr_capable(hdev)) {
1375 di.acl_mtu = hdev->acl_mtu;
1376 di.acl_pkts = hdev->acl_pkts;
1377 di.sco_mtu = hdev->sco_mtu;
1378 di.sco_pkts = hdev->sco_pkts;
1379 } else {
1380 di.acl_mtu = hdev->le_mtu;
1381 di.acl_pkts = hdev->le_pkts;
1382 di.sco_mtu = 0;
1383 di.sco_pkts = 0;
1384 }
1da177e4
LT
1385 di.link_policy = hdev->link_policy;
1386 di.link_mode = hdev->link_mode;
1387
1388 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1389 memcpy(&di.features, &hdev->features, sizeof(di.features));
1390
1391 if (copy_to_user(arg, &di, sizeof(di)))
1392 err = -EFAULT;
1393
1394 hci_dev_put(hdev);
1395
1396 return err;
1397}
1398
1399/* ---- Interface to HCI drivers ---- */
1400
611b30f7
MH
1401static int hci_rfkill_set_block(void *data, bool blocked)
1402{
1403 struct hci_dev *hdev = data;
1404
1405 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1406
1407 if (!blocked)
1408 return 0;
1409
1410 hci_dev_do_close(hdev);
1411
1412 return 0;
1413}
1414
1415static const struct rfkill_ops hci_rfkill_ops = {
1416 .set_block = hci_rfkill_set_block,
1417};
1418
ab81cbf9
JH
1419static void hci_power_on(struct work_struct *work)
1420{
1421 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1422
1423 BT_DBG("%s", hdev->name);
1424
1425 if (hci_dev_open(hdev->id) < 0)
1426 return;
1427
a8b2d5c2 1428 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1429 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1430 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1431
a8b2d5c2 1432 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1433 mgmt_index_added(hdev);
ab81cbf9
JH
1434}
1435
1436static void hci_power_off(struct work_struct *work)
1437{
3243553f 1438 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1439 power_off.work);
ab81cbf9
JH
1440
1441 BT_DBG("%s", hdev->name);
1442
8ee56540 1443 hci_dev_do_close(hdev);
ab81cbf9
JH
1444}
1445
16ab91ab
JH
1446static void hci_discov_off(struct work_struct *work)
1447{
1448 struct hci_dev *hdev;
1449 u8 scan = SCAN_PAGE;
1450
1451 hdev = container_of(work, struct hci_dev, discov_off.work);
1452
1453 BT_DBG("%s", hdev->name);
1454
09fd0de5 1455 hci_dev_lock(hdev);
16ab91ab
JH
1456
1457 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1458
1459 hdev->discov_timeout = 0;
1460
09fd0de5 1461 hci_dev_unlock(hdev);
16ab91ab
JH
1462}
1463
2aeb9a1a
JH
1464int hci_uuids_clear(struct hci_dev *hdev)
1465{
4821002c 1466 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1467
4821002c
JH
1468 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1469 list_del(&uuid->list);
2aeb9a1a
JH
1470 kfree(uuid);
1471 }
1472
1473 return 0;
1474}
1475
55ed8ca1
JH
1476int hci_link_keys_clear(struct hci_dev *hdev)
1477{
1478 struct list_head *p, *n;
1479
1480 list_for_each_safe(p, n, &hdev->link_keys) {
1481 struct link_key *key;
1482
1483 key = list_entry(p, struct link_key, list);
1484
1485 list_del(p);
1486 kfree(key);
1487 }
1488
1489 return 0;
1490}
1491
b899efaf
VCG
1492int hci_smp_ltks_clear(struct hci_dev *hdev)
1493{
1494 struct smp_ltk *k, *tmp;
1495
1496 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1497 list_del(&k->list);
1498 kfree(k);
1499 }
1500
1501 return 0;
1502}
1503
55ed8ca1
JH
1504struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1505{
8035ded4 1506 struct link_key *k;
55ed8ca1 1507
8035ded4 1508 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1509 if (bacmp(bdaddr, &k->bdaddr) == 0)
1510 return k;
55ed8ca1
JH
1511
1512 return NULL;
1513}
1514
745c0ce3 1515static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1516 u8 key_type, u8 old_key_type)
d25e28ab
JH
1517{
1518 /* Legacy key */
1519 if (key_type < 0x03)
745c0ce3 1520 return true;
d25e28ab
JH
1521
1522 /* Debug keys are insecure so don't store them persistently */
1523 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1524 return false;
d25e28ab
JH
1525
1526 /* Changed combination key and there's no previous one */
1527 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1528 return false;
d25e28ab
JH
1529
1530 /* Security mode 3 case */
1531 if (!conn)
745c0ce3 1532 return true;
d25e28ab
JH
1533
1534 /* Neither local nor remote side had no-bonding as requirement */
1535 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1536 return true;
d25e28ab
JH
1537
1538 /* Local side had dedicated bonding as requirement */
1539 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1540 return true;
d25e28ab
JH
1541
1542 /* Remote side had dedicated bonding as requirement */
1543 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1544 return true;
d25e28ab
JH
1545
1546 /* If none of the above criteria match, then don't store the key
1547 * persistently */
745c0ce3 1548 return false;
d25e28ab
JH
1549}
1550
c9839a11 1551struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1552{
c9839a11 1553 struct smp_ltk *k;
75d262c2 1554
c9839a11
VCG
1555 list_for_each_entry(k, &hdev->long_term_keys, list) {
1556 if (k->ediv != ediv ||
a8c5fb1a 1557 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1558 continue;
1559
c9839a11 1560 return k;
75d262c2
VCG
1561 }
1562
1563 return NULL;
1564}
75d262c2 1565
c9839a11 1566struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1567 u8 addr_type)
75d262c2 1568{
c9839a11 1569 struct smp_ltk *k;
75d262c2 1570
c9839a11
VCG
1571 list_for_each_entry(k, &hdev->long_term_keys, list)
1572 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1573 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1574 return k;
1575
1576 return NULL;
1577}
75d262c2 1578
d25e28ab 1579int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1580 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1581{
1582 struct link_key *key, *old_key;
745c0ce3
VA
1583 u8 old_key_type;
1584 bool persistent;
55ed8ca1
JH
1585
1586 old_key = hci_find_link_key(hdev, bdaddr);
1587 if (old_key) {
1588 old_key_type = old_key->type;
1589 key = old_key;
1590 } else {
12adcf3a 1591 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1592 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1593 if (!key)
1594 return -ENOMEM;
1595 list_add(&key->list, &hdev->link_keys);
1596 }
1597
6ed93dc6 1598 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1599
d25e28ab
JH
1600 /* Some buggy controller combinations generate a changed
1601 * combination key for legacy pairing even when there's no
1602 * previous key */
1603 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1604 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1605 type = HCI_LK_COMBINATION;
655fe6ec
JH
1606 if (conn)
1607 conn->key_type = type;
1608 }
d25e28ab 1609
55ed8ca1 1610 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1611 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1612 key->pin_len = pin_len;
1613
b6020ba0 1614 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1615 key->type = old_key_type;
4748fed2
JH
1616 else
1617 key->type = type;
1618
4df378a1
JH
1619 if (!new_key)
1620 return 0;
1621
1622 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1623
744cf19e 1624 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1625
6ec5bcad
VA
1626 if (conn)
1627 conn->flush_key = !persistent;
55ed8ca1
JH
1628
1629 return 0;
1630}
1631
c9839a11 1632int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1633 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1634 ediv, u8 rand[8])
75d262c2 1635{
c9839a11 1636 struct smp_ltk *key, *old_key;
75d262c2 1637
c9839a11
VCG
1638 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1639 return 0;
75d262c2 1640
c9839a11
VCG
1641 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1642 if (old_key)
75d262c2 1643 key = old_key;
c9839a11
VCG
1644 else {
1645 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1646 if (!key)
1647 return -ENOMEM;
c9839a11 1648 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1649 }
1650
75d262c2 1651 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1652 key->bdaddr_type = addr_type;
1653 memcpy(key->val, tk, sizeof(key->val));
1654 key->authenticated = authenticated;
1655 key->ediv = ediv;
1656 key->enc_size = enc_size;
1657 key->type = type;
1658 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1659
c9839a11
VCG
1660 if (!new_key)
1661 return 0;
75d262c2 1662
261cc5aa
VCG
1663 if (type & HCI_SMP_LTK)
1664 mgmt_new_ltk(hdev, key, 1);
1665
75d262c2
VCG
1666 return 0;
1667}
1668
55ed8ca1
JH
1669int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1670{
1671 struct link_key *key;
1672
1673 key = hci_find_link_key(hdev, bdaddr);
1674 if (!key)
1675 return -ENOENT;
1676
6ed93dc6 1677 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1678
1679 list_del(&key->list);
1680 kfree(key);
1681
1682 return 0;
1683}
1684
b899efaf
VCG
1685int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1686{
1687 struct smp_ltk *k, *tmp;
1688
1689 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1690 if (bacmp(bdaddr, &k->bdaddr))
1691 continue;
1692
6ed93dc6 1693 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1694
1695 list_del(&k->list);
1696 kfree(k);
1697 }
1698
1699 return 0;
1700}
1701
6bd32326 1702/* HCI command timer function */
bda4f23a 1703static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1704{
1705 struct hci_dev *hdev = (void *) arg;
1706
bda4f23a
AE
1707 if (hdev->sent_cmd) {
1708 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1709 u16 opcode = __le16_to_cpu(sent->opcode);
1710
1711 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1712 } else {
1713 BT_ERR("%s command tx timeout", hdev->name);
1714 }
1715
6bd32326 1716 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1717 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1718}
1719
2763eda6 1720struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1721 bdaddr_t *bdaddr)
2763eda6
SJ
1722{
1723 struct oob_data *data;
1724
1725 list_for_each_entry(data, &hdev->remote_oob_data, list)
1726 if (bacmp(bdaddr, &data->bdaddr) == 0)
1727 return data;
1728
1729 return NULL;
1730}
1731
1732int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1733{
1734 struct oob_data *data;
1735
1736 data = hci_find_remote_oob_data(hdev, bdaddr);
1737 if (!data)
1738 return -ENOENT;
1739
6ed93dc6 1740 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1741
1742 list_del(&data->list);
1743 kfree(data);
1744
1745 return 0;
1746}
1747
1748int hci_remote_oob_data_clear(struct hci_dev *hdev)
1749{
1750 struct oob_data *data, *n;
1751
1752 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1753 list_del(&data->list);
1754 kfree(data);
1755 }
1756
1757 return 0;
1758}
1759
1760int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1761 u8 *randomizer)
2763eda6
SJ
1762{
1763 struct oob_data *data;
1764
1765 data = hci_find_remote_oob_data(hdev, bdaddr);
1766
1767 if (!data) {
1768 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1769 if (!data)
1770 return -ENOMEM;
1771
1772 bacpy(&data->bdaddr, bdaddr);
1773 list_add(&data->list, &hdev->remote_oob_data);
1774 }
1775
1776 memcpy(data->hash, hash, sizeof(data->hash));
1777 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1778
6ed93dc6 1779 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1780
1781 return 0;
1782}
1783
04124681 1784struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1785{
8035ded4 1786 struct bdaddr_list *b;
b2a66aad 1787
8035ded4 1788 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1789 if (bacmp(bdaddr, &b->bdaddr) == 0)
1790 return b;
b2a66aad
AJ
1791
1792 return NULL;
1793}
1794
1795int hci_blacklist_clear(struct hci_dev *hdev)
1796{
1797 struct list_head *p, *n;
1798
1799 list_for_each_safe(p, n, &hdev->blacklist) {
1800 struct bdaddr_list *b;
1801
1802 b = list_entry(p, struct bdaddr_list, list);
1803
1804 list_del(p);
1805 kfree(b);
1806 }
1807
1808 return 0;
1809}
1810
88c1fe4b 1811int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1812{
1813 struct bdaddr_list *entry;
b2a66aad
AJ
1814
1815 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1816 return -EBADF;
1817
5e762444
AJ
1818 if (hci_blacklist_lookup(hdev, bdaddr))
1819 return -EEXIST;
b2a66aad
AJ
1820
1821 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1822 if (!entry)
1823 return -ENOMEM;
b2a66aad
AJ
1824
1825 bacpy(&entry->bdaddr, bdaddr);
1826
1827 list_add(&entry->list, &hdev->blacklist);
1828
88c1fe4b 1829 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1830}
1831
88c1fe4b 1832int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1833{
1834 struct bdaddr_list *entry;
b2a66aad 1835
1ec918ce 1836 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1837 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1838
1839 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1840 if (!entry)
5e762444 1841 return -ENOENT;
b2a66aad
AJ
1842
1843 list_del(&entry->list);
1844 kfree(entry);
1845
88c1fe4b 1846 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1847}
1848
42c6b129 1849static void le_scan_param_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1850{
1851 struct le_scan_params *param = (struct le_scan_params *) opt;
1852 struct hci_cp_le_set_scan_param cp;
1853
1854 memset(&cp, 0, sizeof(cp));
1855 cp.type = param->type;
1856 cp.interval = cpu_to_le16(param->interval);
1857 cp.window = cpu_to_le16(param->window);
1858
42c6b129 1859 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
7ba8b4be
AG
1860}
1861
42c6b129 1862static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1863{
1864 struct hci_cp_le_set_scan_enable cp;
1865
1866 memset(&cp, 0, sizeof(cp));
1867 cp.enable = 1;
0431a43c 1868 cp.filter_dup = 1;
7ba8b4be 1869
42c6b129 1870 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
7ba8b4be
AG
1871}
1872
1873static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1874 u16 window, int timeout)
7ba8b4be
AG
1875{
1876 long timeo = msecs_to_jiffies(3000);
1877 struct le_scan_params param;
1878 int err;
1879
1880 BT_DBG("%s", hdev->name);
1881
1882 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1883 return -EINPROGRESS;
1884
1885 param.type = type;
1886 param.interval = interval;
1887 param.window = window;
1888
1889 hci_req_lock(hdev);
1890
01178cd4
JH
1891 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1892 timeo);
7ba8b4be 1893 if (!err)
01178cd4 1894 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
1895
1896 hci_req_unlock(hdev);
1897
1898 if (err < 0)
1899 return err;
1900
46818ed5
JH
1901 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1902 msecs_to_jiffies(timeout));
7ba8b4be
AG
1903
1904 return 0;
1905}
1906
7dbfac1d
AG
1907int hci_cancel_le_scan(struct hci_dev *hdev)
1908{
1909 BT_DBG("%s", hdev->name);
1910
1911 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1912 return -EALREADY;
1913
1914 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1915 struct hci_cp_le_set_scan_enable cp;
1916
1917 /* Send HCI command to disable LE Scan */
1918 memset(&cp, 0, sizeof(cp));
1919 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1920 }
1921
1922 return 0;
1923}
1924
7ba8b4be
AG
1925static void le_scan_disable_work(struct work_struct *work)
1926{
1927 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1928 le_scan_disable.work);
7ba8b4be
AG
1929 struct hci_cp_le_set_scan_enable cp;
1930
1931 BT_DBG("%s", hdev->name);
1932
1933 memset(&cp, 0, sizeof(cp));
1934
1935 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1936}
1937
28b75a89
AG
1938static void le_scan_work(struct work_struct *work)
1939{
1940 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1941 struct le_scan_params *param = &hdev->le_scan_params;
1942
1943 BT_DBG("%s", hdev->name);
1944
04124681
GP
1945 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1946 param->timeout);
28b75a89
AG
1947}
1948
1949int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1950 int timeout)
28b75a89
AG
1951{
1952 struct le_scan_params *param = &hdev->le_scan_params;
1953
1954 BT_DBG("%s", hdev->name);
1955
f1550478
JH
1956 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1957 return -ENOTSUPP;
1958
28b75a89
AG
1959 if (work_busy(&hdev->le_scan))
1960 return -EINPROGRESS;
1961
1962 param->type = type;
1963 param->interval = interval;
1964 param->window = window;
1965 param->timeout = timeout;
1966
1967 queue_work(system_long_wq, &hdev->le_scan);
1968
1969 return 0;
1970}
1971
9be0dab7
DH
1972/* Alloc HCI device */
1973struct hci_dev *hci_alloc_dev(void)
1974{
1975 struct hci_dev *hdev;
1976
1977 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1978 if (!hdev)
1979 return NULL;
1980
b1b813d4
DH
1981 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1982 hdev->esco_type = (ESCO_HV1);
1983 hdev->link_mode = (HCI_LM_ACCEPT);
1984 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
1985 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1986 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 1987
b1b813d4
DH
1988 hdev->sniff_max_interval = 800;
1989 hdev->sniff_min_interval = 80;
1990
1991 mutex_init(&hdev->lock);
1992 mutex_init(&hdev->req_lock);
1993
1994 INIT_LIST_HEAD(&hdev->mgmt_pending);
1995 INIT_LIST_HEAD(&hdev->blacklist);
1996 INIT_LIST_HEAD(&hdev->uuids);
1997 INIT_LIST_HEAD(&hdev->link_keys);
1998 INIT_LIST_HEAD(&hdev->long_term_keys);
1999 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2000 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2001
2002 INIT_WORK(&hdev->rx_work, hci_rx_work);
2003 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2004 INIT_WORK(&hdev->tx_work, hci_tx_work);
2005 INIT_WORK(&hdev->power_on, hci_power_on);
2006 INIT_WORK(&hdev->le_scan, le_scan_work);
2007
b1b813d4
DH
2008 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2009 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2010 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2011
9be0dab7 2012 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
2013 skb_queue_head_init(&hdev->rx_q);
2014 skb_queue_head_init(&hdev->cmd_q);
2015 skb_queue_head_init(&hdev->raw_q);
2016
2017 init_waitqueue_head(&hdev->req_wait_q);
2018
bda4f23a 2019 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2020
b1b813d4
DH
2021 hci_init_sysfs(hdev);
2022 discovery_init(hdev);
9be0dab7
DH
2023
2024 return hdev;
2025}
2026EXPORT_SYMBOL(hci_alloc_dev);
2027
2028/* Free HCI device */
2029void hci_free_dev(struct hci_dev *hdev)
2030{
2031 skb_queue_purge(&hdev->driver_init);
2032
2033 /* will free via device release */
2034 put_device(&hdev->dev);
2035}
2036EXPORT_SYMBOL(hci_free_dev);
2037
1da177e4
LT
2038/* Register HCI device */
2039int hci_register_dev(struct hci_dev *hdev)
2040{
b1b813d4 2041 int id, error;
1da177e4 2042
010666a1 2043 if (!hdev->open || !hdev->close)
1da177e4
LT
2044 return -EINVAL;
2045
08add513
MM
2046 /* Do not allow HCI_AMP devices to register at index 0,
2047 * so the index can be used as the AMP controller ID.
2048 */
3df92b31
SL
2049 switch (hdev->dev_type) {
2050 case HCI_BREDR:
2051 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2052 break;
2053 case HCI_AMP:
2054 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2055 break;
2056 default:
2057 return -EINVAL;
1da177e4 2058 }
8e87d142 2059
3df92b31
SL
2060 if (id < 0)
2061 return id;
2062
1da177e4
LT
2063 sprintf(hdev->name, "hci%d", id);
2064 hdev->id = id;
2d8b3a11
AE
2065
2066 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2067
3df92b31
SL
2068 write_lock(&hci_dev_list_lock);
2069 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2070 write_unlock(&hci_dev_list_lock);
1da177e4 2071
32845eb1 2072 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 2073 WQ_MEM_RECLAIM, 1);
33ca954d
DH
2074 if (!hdev->workqueue) {
2075 error = -ENOMEM;
2076 goto err;
2077 }
f48fd9c8 2078
6ead1bbc
JH
2079 hdev->req_workqueue = alloc_workqueue(hdev->name,
2080 WQ_HIGHPRI | WQ_UNBOUND |
2081 WQ_MEM_RECLAIM, 1);
2082 if (!hdev->req_workqueue) {
2083 destroy_workqueue(hdev->workqueue);
2084 error = -ENOMEM;
2085 goto err;
2086 }
2087
33ca954d
DH
2088 error = hci_add_sysfs(hdev);
2089 if (error < 0)
2090 goto err_wqueue;
1da177e4 2091
611b30f7 2092 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2093 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2094 hdev);
611b30f7
MH
2095 if (hdev->rfkill) {
2096 if (rfkill_register(hdev->rfkill) < 0) {
2097 rfkill_destroy(hdev->rfkill);
2098 hdev->rfkill = NULL;
2099 }
2100 }
2101
a8b2d5c2 2102 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2103
2104 if (hdev->dev_type != HCI_AMP)
2105 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2106
1da177e4 2107 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2108 hci_dev_hold(hdev);
1da177e4 2109
19202573 2110 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2111
1da177e4 2112 return id;
f48fd9c8 2113
33ca954d
DH
2114err_wqueue:
2115 destroy_workqueue(hdev->workqueue);
6ead1bbc 2116 destroy_workqueue(hdev->req_workqueue);
33ca954d 2117err:
3df92b31 2118 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2119 write_lock(&hci_dev_list_lock);
f48fd9c8 2120 list_del(&hdev->list);
f20d09d5 2121 write_unlock(&hci_dev_list_lock);
f48fd9c8 2122
33ca954d 2123 return error;
1da177e4
LT
2124}
2125EXPORT_SYMBOL(hci_register_dev);
2126
2127/* Unregister HCI device */
59735631 2128void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2129{
3df92b31 2130 int i, id;
ef222013 2131
c13854ce 2132 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2133
94324962
JH
2134 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2135
3df92b31
SL
2136 id = hdev->id;
2137
f20d09d5 2138 write_lock(&hci_dev_list_lock);
1da177e4 2139 list_del(&hdev->list);
f20d09d5 2140 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2141
2142 hci_dev_do_close(hdev);
2143
cd4c5391 2144 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2145 kfree_skb(hdev->reassembly[i]);
2146
b9b5ef18
GP
2147 cancel_work_sync(&hdev->power_on);
2148
ab81cbf9 2149 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2150 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2151 hci_dev_lock(hdev);
744cf19e 2152 mgmt_index_removed(hdev);
09fd0de5 2153 hci_dev_unlock(hdev);
56e5cb86 2154 }
ab81cbf9 2155
2e58ef3e
JH
2156 /* mgmt_index_removed should take care of emptying the
2157 * pending list */
2158 BUG_ON(!list_empty(&hdev->mgmt_pending));
2159
1da177e4
LT
2160 hci_notify(hdev, HCI_DEV_UNREG);
2161
611b30f7
MH
2162 if (hdev->rfkill) {
2163 rfkill_unregister(hdev->rfkill);
2164 rfkill_destroy(hdev->rfkill);
2165 }
2166
ce242970 2167 hci_del_sysfs(hdev);
147e2d59 2168
f48fd9c8 2169 destroy_workqueue(hdev->workqueue);
6ead1bbc 2170 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2171
09fd0de5 2172 hci_dev_lock(hdev);
e2e0cacb 2173 hci_blacklist_clear(hdev);
2aeb9a1a 2174 hci_uuids_clear(hdev);
55ed8ca1 2175 hci_link_keys_clear(hdev);
b899efaf 2176 hci_smp_ltks_clear(hdev);
2763eda6 2177 hci_remote_oob_data_clear(hdev);
09fd0de5 2178 hci_dev_unlock(hdev);
e2e0cacb 2179
dc946bd8 2180 hci_dev_put(hdev);
3df92b31
SL
2181
2182 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2183}
2184EXPORT_SYMBOL(hci_unregister_dev);
2185
2186/* Suspend HCI device */
2187int hci_suspend_dev(struct hci_dev *hdev)
2188{
2189 hci_notify(hdev, HCI_DEV_SUSPEND);
2190 return 0;
2191}
2192EXPORT_SYMBOL(hci_suspend_dev);
2193
2194/* Resume HCI device */
2195int hci_resume_dev(struct hci_dev *hdev)
2196{
2197 hci_notify(hdev, HCI_DEV_RESUME);
2198 return 0;
2199}
2200EXPORT_SYMBOL(hci_resume_dev);
2201
76bca880
MH
2202/* Receive frame from HCI drivers */
2203int hci_recv_frame(struct sk_buff *skb)
2204{
2205 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2206 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2207 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2208 kfree_skb(skb);
2209 return -ENXIO;
2210 }
2211
d82603c6 2212 /* Incoming skb */
76bca880
MH
2213 bt_cb(skb)->incoming = 1;
2214
2215 /* Time stamp */
2216 __net_timestamp(skb);
2217
76bca880 2218 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2219 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2220
76bca880
MH
2221 return 0;
2222}
2223EXPORT_SYMBOL(hci_recv_frame);
2224
33e882a5 2225static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2226 int count, __u8 index)
33e882a5
SS
2227{
2228 int len = 0;
2229 int hlen = 0;
2230 int remain = count;
2231 struct sk_buff *skb;
2232 struct bt_skb_cb *scb;
2233
2234 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2235 index >= NUM_REASSEMBLY)
33e882a5
SS
2236 return -EILSEQ;
2237
2238 skb = hdev->reassembly[index];
2239
2240 if (!skb) {
2241 switch (type) {
2242 case HCI_ACLDATA_PKT:
2243 len = HCI_MAX_FRAME_SIZE;
2244 hlen = HCI_ACL_HDR_SIZE;
2245 break;
2246 case HCI_EVENT_PKT:
2247 len = HCI_MAX_EVENT_SIZE;
2248 hlen = HCI_EVENT_HDR_SIZE;
2249 break;
2250 case HCI_SCODATA_PKT:
2251 len = HCI_MAX_SCO_SIZE;
2252 hlen = HCI_SCO_HDR_SIZE;
2253 break;
2254 }
2255
1e429f38 2256 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2257 if (!skb)
2258 return -ENOMEM;
2259
2260 scb = (void *) skb->cb;
2261 scb->expect = hlen;
2262 scb->pkt_type = type;
2263
2264 skb->dev = (void *) hdev;
2265 hdev->reassembly[index] = skb;
2266 }
2267
2268 while (count) {
2269 scb = (void *) skb->cb;
89bb46d0 2270 len = min_t(uint, scb->expect, count);
33e882a5
SS
2271
2272 memcpy(skb_put(skb, len), data, len);
2273
2274 count -= len;
2275 data += len;
2276 scb->expect -= len;
2277 remain = count;
2278
2279 switch (type) {
2280 case HCI_EVENT_PKT:
2281 if (skb->len == HCI_EVENT_HDR_SIZE) {
2282 struct hci_event_hdr *h = hci_event_hdr(skb);
2283 scb->expect = h->plen;
2284
2285 if (skb_tailroom(skb) < scb->expect) {
2286 kfree_skb(skb);
2287 hdev->reassembly[index] = NULL;
2288 return -ENOMEM;
2289 }
2290 }
2291 break;
2292
2293 case HCI_ACLDATA_PKT:
2294 if (skb->len == HCI_ACL_HDR_SIZE) {
2295 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2296 scb->expect = __le16_to_cpu(h->dlen);
2297
2298 if (skb_tailroom(skb) < scb->expect) {
2299 kfree_skb(skb);
2300 hdev->reassembly[index] = NULL;
2301 return -ENOMEM;
2302 }
2303 }
2304 break;
2305
2306 case HCI_SCODATA_PKT:
2307 if (skb->len == HCI_SCO_HDR_SIZE) {
2308 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2309 scb->expect = h->dlen;
2310
2311 if (skb_tailroom(skb) < scb->expect) {
2312 kfree_skb(skb);
2313 hdev->reassembly[index] = NULL;
2314 return -ENOMEM;
2315 }
2316 }
2317 break;
2318 }
2319
2320 if (scb->expect == 0) {
2321 /* Complete frame */
2322
2323 bt_cb(skb)->pkt_type = type;
2324 hci_recv_frame(skb);
2325
2326 hdev->reassembly[index] = NULL;
2327 return remain;
2328 }
2329 }
2330
2331 return remain;
2332}
2333
ef222013
MH
2334int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2335{
f39a3c06
SS
2336 int rem = 0;
2337
ef222013
MH
2338 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2339 return -EILSEQ;
2340
da5f6c37 2341 while (count) {
1e429f38 2342 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2343 if (rem < 0)
2344 return rem;
ef222013 2345
f39a3c06
SS
2346 data += (count - rem);
2347 count = rem;
f81c6224 2348 }
ef222013 2349
f39a3c06 2350 return rem;
ef222013
MH
2351}
2352EXPORT_SYMBOL(hci_recv_fragment);
2353
99811510
SS
2354#define STREAM_REASSEMBLY 0
2355
2356int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2357{
2358 int type;
2359 int rem = 0;
2360
da5f6c37 2361 while (count) {
99811510
SS
2362 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2363
2364 if (!skb) {
2365 struct { char type; } *pkt;
2366
2367 /* Start of the frame */
2368 pkt = data;
2369 type = pkt->type;
2370
2371 data++;
2372 count--;
2373 } else
2374 type = bt_cb(skb)->pkt_type;
2375
1e429f38 2376 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2377 STREAM_REASSEMBLY);
99811510
SS
2378 if (rem < 0)
2379 return rem;
2380
2381 data += (count - rem);
2382 count = rem;
f81c6224 2383 }
99811510
SS
2384
2385 return rem;
2386}
2387EXPORT_SYMBOL(hci_recv_stream_fragment);
2388
1da177e4
LT
2389/* ---- Interface to upper protocols ---- */
2390
1da177e4
LT
2391int hci_register_cb(struct hci_cb *cb)
2392{
2393 BT_DBG("%p name %s", cb, cb->name);
2394
f20d09d5 2395 write_lock(&hci_cb_list_lock);
1da177e4 2396 list_add(&cb->list, &hci_cb_list);
f20d09d5 2397 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2398
2399 return 0;
2400}
2401EXPORT_SYMBOL(hci_register_cb);
2402
2403int hci_unregister_cb(struct hci_cb *cb)
2404{
2405 BT_DBG("%p name %s", cb, cb->name);
2406
f20d09d5 2407 write_lock(&hci_cb_list_lock);
1da177e4 2408 list_del(&cb->list);
f20d09d5 2409 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2410
2411 return 0;
2412}
2413EXPORT_SYMBOL(hci_unregister_cb);
2414
2415static int hci_send_frame(struct sk_buff *skb)
2416{
2417 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2418
2419 if (!hdev) {
2420 kfree_skb(skb);
2421 return -ENODEV;
2422 }
2423
0d48d939 2424 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2425
cd82e61c
MH
2426 /* Time stamp */
2427 __net_timestamp(skb);
1da177e4 2428
cd82e61c
MH
2429 /* Send copy to monitor */
2430 hci_send_to_monitor(hdev, skb);
2431
2432 if (atomic_read(&hdev->promisc)) {
2433 /* Send copy to the sockets */
470fe1b5 2434 hci_send_to_sock(hdev, skb);
1da177e4
LT
2435 }
2436
2437 /* Get rid of skb owner, prior to sending to the driver. */
2438 skb_orphan(skb);
2439
2440 return hdev->send(skb);
2441}
2442
3119ae95
JH
2443void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2444{
2445 skb_queue_head_init(&req->cmd_q);
2446 req->hdev = hdev;
5d73e034 2447 req->err = 0;
3119ae95
JH
2448}
2449
2450int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2451{
2452 struct hci_dev *hdev = req->hdev;
2453 struct sk_buff *skb;
2454 unsigned long flags;
2455
2456 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2457
5d73e034
AG
2458 /* If an error occured during request building, remove all HCI
2459 * commands queued on the HCI request queue.
2460 */
2461 if (req->err) {
2462 skb_queue_purge(&req->cmd_q);
2463 return req->err;
2464 }
2465
3119ae95
JH
2466 /* Do not allow empty requests */
2467 if (skb_queue_empty(&req->cmd_q))
382b0c39 2468 return -ENODATA;
3119ae95
JH
2469
2470 skb = skb_peek_tail(&req->cmd_q);
2471 bt_cb(skb)->req.complete = complete;
2472
2473 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2474 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2475 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2476
2477 queue_work(hdev->workqueue, &hdev->cmd_work);
2478
2479 return 0;
2480}
2481
1ca3a9d0
JH
2482static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2483 u32 plen, void *param)
1da177e4
LT
2484{
2485 int len = HCI_COMMAND_HDR_SIZE + plen;
2486 struct hci_command_hdr *hdr;
2487 struct sk_buff *skb;
2488
1da177e4 2489 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2490 if (!skb)
2491 return NULL;
1da177e4
LT
2492
2493 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2494 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2495 hdr->plen = plen;
2496
2497 if (plen)
2498 memcpy(skb_put(skb, plen), param, plen);
2499
2500 BT_DBG("skb len %d", skb->len);
2501
0d48d939 2502 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2503 skb->dev = (void *) hdev;
c78ae283 2504
1ca3a9d0
JH
2505 return skb;
2506}
2507
2508/* Send HCI command */
2509int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2510{
2511 struct sk_buff *skb;
2512
2513 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2514
2515 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2516 if (!skb) {
2517 BT_ERR("%s no memory for command", hdev->name);
2518 return -ENOMEM;
2519 }
2520
11714b3d
JH
2521 /* Stand-alone HCI commands must be flaged as
2522 * single-command requests.
2523 */
2524 bt_cb(skb)->req.start = true;
2525
1da177e4 2526 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2527 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2528
2529 return 0;
2530}
1da177e4 2531
71c76a17 2532/* Queue a command to an asynchronous HCI request */
e348fe6b 2533void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
71c76a17
JH
2534{
2535 struct hci_dev *hdev = req->hdev;
2536 struct sk_buff *skb;
2537
2538 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2539
34739c1e
AG
2540 /* If an error occured during request building, there is no point in
2541 * queueing the HCI command. We can simply return.
2542 */
2543 if (req->err)
2544 return;
2545
71c76a17
JH
2546 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2547 if (!skb) {
5d73e034
AG
2548 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2549 hdev->name, opcode);
2550 req->err = -ENOMEM;
e348fe6b 2551 return;
71c76a17
JH
2552 }
2553
2554 if (skb_queue_empty(&req->cmd_q))
2555 bt_cb(skb)->req.start = true;
2556
2557 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2558}
2559
1da177e4 2560/* Get data from the previously sent command */
a9de9248 2561void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2562{
2563 struct hci_command_hdr *hdr;
2564
2565 if (!hdev->sent_cmd)
2566 return NULL;
2567
2568 hdr = (void *) hdev->sent_cmd->data;
2569
a9de9248 2570 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2571 return NULL;
2572
f0e09510 2573 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2574
2575 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2576}
2577
2578/* Send ACL data */
2579static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2580{
2581 struct hci_acl_hdr *hdr;
2582 int len = skb->len;
2583
badff6d0
ACM
2584 skb_push(skb, HCI_ACL_HDR_SIZE);
2585 skb_reset_transport_header(skb);
9c70220b 2586 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2587 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2588 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2589}
2590
ee22be7e 2591static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2592 struct sk_buff *skb, __u16 flags)
1da177e4 2593{
ee22be7e 2594 struct hci_conn *conn = chan->conn;
1da177e4
LT
2595 struct hci_dev *hdev = conn->hdev;
2596 struct sk_buff *list;
2597
087bfd99
GP
2598 skb->len = skb_headlen(skb);
2599 skb->data_len = 0;
2600
2601 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2602
2603 switch (hdev->dev_type) {
2604 case HCI_BREDR:
2605 hci_add_acl_hdr(skb, conn->handle, flags);
2606 break;
2607 case HCI_AMP:
2608 hci_add_acl_hdr(skb, chan->handle, flags);
2609 break;
2610 default:
2611 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2612 return;
2613 }
087bfd99 2614
70f23020
AE
2615 list = skb_shinfo(skb)->frag_list;
2616 if (!list) {
1da177e4
LT
2617 /* Non fragmented */
2618 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2619
73d80deb 2620 skb_queue_tail(queue, skb);
1da177e4
LT
2621 } else {
2622 /* Fragmented */
2623 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2624
2625 skb_shinfo(skb)->frag_list = NULL;
2626
2627 /* Queue all fragments atomically */
af3e6359 2628 spin_lock(&queue->lock);
1da177e4 2629
73d80deb 2630 __skb_queue_tail(queue, skb);
e702112f
AE
2631
2632 flags &= ~ACL_START;
2633 flags |= ACL_CONT;
1da177e4
LT
2634 do {
2635 skb = list; list = list->next;
8e87d142 2636
1da177e4 2637 skb->dev = (void *) hdev;
0d48d939 2638 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2639 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2640
2641 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2642
73d80deb 2643 __skb_queue_tail(queue, skb);
1da177e4
LT
2644 } while (list);
2645
af3e6359 2646 spin_unlock(&queue->lock);
1da177e4 2647 }
73d80deb
LAD
2648}
2649
2650void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2651{
ee22be7e 2652 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2653
f0e09510 2654 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2655
2656 skb->dev = (void *) hdev;
73d80deb 2657
ee22be7e 2658 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2659
3eff45ea 2660 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2661}
1da177e4
LT
2662
2663/* Send SCO data */
0d861d8b 2664void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2665{
2666 struct hci_dev *hdev = conn->hdev;
2667 struct hci_sco_hdr hdr;
2668
2669 BT_DBG("%s len %d", hdev->name, skb->len);
2670
aca3192c 2671 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2672 hdr.dlen = skb->len;
2673
badff6d0
ACM
2674 skb_push(skb, HCI_SCO_HDR_SIZE);
2675 skb_reset_transport_header(skb);
9c70220b 2676 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2677
2678 skb->dev = (void *) hdev;
0d48d939 2679 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2680
1da177e4 2681 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2682 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2683}
1da177e4
LT
2684
2685/* ---- HCI TX task (outgoing data) ---- */
2686
2687/* HCI Connection scheduler */
6039aa73
GP
2688static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2689 int *quote)
1da177e4
LT
2690{
2691 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2692 struct hci_conn *conn = NULL, *c;
abc5de8f 2693 unsigned int num = 0, min = ~0;
1da177e4 2694
8e87d142 2695 /* We don't have to lock device here. Connections are always
1da177e4 2696 * added and removed with TX task disabled. */
bf4c6325
GP
2697
2698 rcu_read_lock();
2699
2700 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2701 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2702 continue;
769be974
MH
2703
2704 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2705 continue;
2706
1da177e4
LT
2707 num++;
2708
2709 if (c->sent < min) {
2710 min = c->sent;
2711 conn = c;
2712 }
52087a79
LAD
2713
2714 if (hci_conn_num(hdev, type) == num)
2715 break;
1da177e4
LT
2716 }
2717
bf4c6325
GP
2718 rcu_read_unlock();
2719
1da177e4 2720 if (conn) {
6ed58ec5
VT
2721 int cnt, q;
2722
2723 switch (conn->type) {
2724 case ACL_LINK:
2725 cnt = hdev->acl_cnt;
2726 break;
2727 case SCO_LINK:
2728 case ESCO_LINK:
2729 cnt = hdev->sco_cnt;
2730 break;
2731 case LE_LINK:
2732 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2733 break;
2734 default:
2735 cnt = 0;
2736 BT_ERR("Unknown link type");
2737 }
2738
2739 q = cnt / num;
1da177e4
LT
2740 *quote = q ? q : 1;
2741 } else
2742 *quote = 0;
2743
2744 BT_DBG("conn %p quote %d", conn, *quote);
2745 return conn;
2746}
2747
6039aa73 2748static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2749{
2750 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2751 struct hci_conn *c;
1da177e4 2752
bae1f5d9 2753 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2754
bf4c6325
GP
2755 rcu_read_lock();
2756
1da177e4 2757 /* Kill stalled connections */
bf4c6325 2758 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2759 if (c->type == type && c->sent) {
6ed93dc6
AE
2760 BT_ERR("%s killing stalled connection %pMR",
2761 hdev->name, &c->dst);
bed71748 2762 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2763 }
2764 }
bf4c6325
GP
2765
2766 rcu_read_unlock();
1da177e4
LT
2767}
2768
6039aa73
GP
2769static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2770 int *quote)
1da177e4 2771{
73d80deb
LAD
2772 struct hci_conn_hash *h = &hdev->conn_hash;
2773 struct hci_chan *chan = NULL;
abc5de8f 2774 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2775 struct hci_conn *conn;
73d80deb
LAD
2776 int cnt, q, conn_num = 0;
2777
2778 BT_DBG("%s", hdev->name);
2779
bf4c6325
GP
2780 rcu_read_lock();
2781
2782 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2783 struct hci_chan *tmp;
2784
2785 if (conn->type != type)
2786 continue;
2787
2788 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2789 continue;
2790
2791 conn_num++;
2792
8192edef 2793 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2794 struct sk_buff *skb;
2795
2796 if (skb_queue_empty(&tmp->data_q))
2797 continue;
2798
2799 skb = skb_peek(&tmp->data_q);
2800 if (skb->priority < cur_prio)
2801 continue;
2802
2803 if (skb->priority > cur_prio) {
2804 num = 0;
2805 min = ~0;
2806 cur_prio = skb->priority;
2807 }
2808
2809 num++;
2810
2811 if (conn->sent < min) {
2812 min = conn->sent;
2813 chan = tmp;
2814 }
2815 }
2816
2817 if (hci_conn_num(hdev, type) == conn_num)
2818 break;
2819 }
2820
bf4c6325
GP
2821 rcu_read_unlock();
2822
73d80deb
LAD
2823 if (!chan)
2824 return NULL;
2825
2826 switch (chan->conn->type) {
2827 case ACL_LINK:
2828 cnt = hdev->acl_cnt;
2829 break;
bd1eb66b
AE
2830 case AMP_LINK:
2831 cnt = hdev->block_cnt;
2832 break;
73d80deb
LAD
2833 case SCO_LINK:
2834 case ESCO_LINK:
2835 cnt = hdev->sco_cnt;
2836 break;
2837 case LE_LINK:
2838 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2839 break;
2840 default:
2841 cnt = 0;
2842 BT_ERR("Unknown link type");
2843 }
2844
2845 q = cnt / num;
2846 *quote = q ? q : 1;
2847 BT_DBG("chan %p quote %d", chan, *quote);
2848 return chan;
2849}
2850
02b20f0b
LAD
2851static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2852{
2853 struct hci_conn_hash *h = &hdev->conn_hash;
2854 struct hci_conn *conn;
2855 int num = 0;
2856
2857 BT_DBG("%s", hdev->name);
2858
bf4c6325
GP
2859 rcu_read_lock();
2860
2861 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2862 struct hci_chan *chan;
2863
2864 if (conn->type != type)
2865 continue;
2866
2867 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2868 continue;
2869
2870 num++;
2871
8192edef 2872 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2873 struct sk_buff *skb;
2874
2875 if (chan->sent) {
2876 chan->sent = 0;
2877 continue;
2878 }
2879
2880 if (skb_queue_empty(&chan->data_q))
2881 continue;
2882
2883 skb = skb_peek(&chan->data_q);
2884 if (skb->priority >= HCI_PRIO_MAX - 1)
2885 continue;
2886
2887 skb->priority = HCI_PRIO_MAX - 1;
2888
2889 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2890 skb->priority);
02b20f0b
LAD
2891 }
2892
2893 if (hci_conn_num(hdev, type) == num)
2894 break;
2895 }
bf4c6325
GP
2896
2897 rcu_read_unlock();
2898
02b20f0b
LAD
2899}
2900
b71d385a
AE
2901static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2902{
2903 /* Calculate count of blocks used by this packet */
2904 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2905}
2906
6039aa73 2907static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2908{
1da177e4
LT
2909 if (!test_bit(HCI_RAW, &hdev->flags)) {
2910 /* ACL tx timeout must be longer than maximum
2911 * link supervision timeout (40.9 seconds) */
63d2bc1b 2912 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2913 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2914 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2915 }
63d2bc1b 2916}
1da177e4 2917
6039aa73 2918static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2919{
2920 unsigned int cnt = hdev->acl_cnt;
2921 struct hci_chan *chan;
2922 struct sk_buff *skb;
2923 int quote;
2924
2925 __check_timeout(hdev, cnt);
04837f64 2926
73d80deb 2927 while (hdev->acl_cnt &&
a8c5fb1a 2928 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2929 u32 priority = (skb_peek(&chan->data_q))->priority;
2930 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2931 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2932 skb->len, skb->priority);
73d80deb 2933
ec1cce24
LAD
2934 /* Stop if priority has changed */
2935 if (skb->priority < priority)
2936 break;
2937
2938 skb = skb_dequeue(&chan->data_q);
2939
73d80deb 2940 hci_conn_enter_active_mode(chan->conn,
04124681 2941 bt_cb(skb)->force_active);
04837f64 2942
1da177e4
LT
2943 hci_send_frame(skb);
2944 hdev->acl_last_tx = jiffies;
2945
2946 hdev->acl_cnt--;
73d80deb
LAD
2947 chan->sent++;
2948 chan->conn->sent++;
1da177e4
LT
2949 }
2950 }
02b20f0b
LAD
2951
2952 if (cnt != hdev->acl_cnt)
2953 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2954}
2955
6039aa73 2956static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2957{
63d2bc1b 2958 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2959 struct hci_chan *chan;
2960 struct sk_buff *skb;
2961 int quote;
bd1eb66b 2962 u8 type;
b71d385a 2963
63d2bc1b 2964 __check_timeout(hdev, cnt);
b71d385a 2965
bd1eb66b
AE
2966 BT_DBG("%s", hdev->name);
2967
2968 if (hdev->dev_type == HCI_AMP)
2969 type = AMP_LINK;
2970 else
2971 type = ACL_LINK;
2972
b71d385a 2973 while (hdev->block_cnt > 0 &&
bd1eb66b 2974 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
2975 u32 priority = (skb_peek(&chan->data_q))->priority;
2976 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2977 int blocks;
2978
2979 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2980 skb->len, skb->priority);
b71d385a
AE
2981
2982 /* Stop if priority has changed */
2983 if (skb->priority < priority)
2984 break;
2985
2986 skb = skb_dequeue(&chan->data_q);
2987
2988 blocks = __get_blocks(hdev, skb);
2989 if (blocks > hdev->block_cnt)
2990 return;
2991
2992 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2993 bt_cb(skb)->force_active);
b71d385a
AE
2994
2995 hci_send_frame(skb);
2996 hdev->acl_last_tx = jiffies;
2997
2998 hdev->block_cnt -= blocks;
2999 quote -= blocks;
3000
3001 chan->sent += blocks;
3002 chan->conn->sent += blocks;
3003 }
3004 }
3005
3006 if (cnt != hdev->block_cnt)
bd1eb66b 3007 hci_prio_recalculate(hdev, type);
b71d385a
AE
3008}
3009
6039aa73 3010static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3011{
3012 BT_DBG("%s", hdev->name);
3013
bd1eb66b
AE
3014 /* No ACL link over BR/EDR controller */
3015 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3016 return;
3017
3018 /* No AMP link over AMP controller */
3019 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3020 return;
3021
3022 switch (hdev->flow_ctl_mode) {
3023 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3024 hci_sched_acl_pkt(hdev);
3025 break;
3026
3027 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3028 hci_sched_acl_blk(hdev);
3029 break;
3030 }
3031}
3032
1da177e4 3033/* Schedule SCO */
6039aa73 3034static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3035{
3036 struct hci_conn *conn;
3037 struct sk_buff *skb;
3038 int quote;
3039
3040 BT_DBG("%s", hdev->name);
3041
52087a79
LAD
3042 if (!hci_conn_num(hdev, SCO_LINK))
3043 return;
3044
1da177e4
LT
3045 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3046 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3047 BT_DBG("skb %p len %d", skb, skb->len);
3048 hci_send_frame(skb);
3049
3050 conn->sent++;
3051 if (conn->sent == ~0)
3052 conn->sent = 0;
3053 }
3054 }
3055}
3056
6039aa73 3057static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3058{
3059 struct hci_conn *conn;
3060 struct sk_buff *skb;
3061 int quote;
3062
3063 BT_DBG("%s", hdev->name);
3064
52087a79
LAD
3065 if (!hci_conn_num(hdev, ESCO_LINK))
3066 return;
3067
8fc9ced3
GP
3068 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3069 &quote))) {
b6a0dc82
MH
3070 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3071 BT_DBG("skb %p len %d", skb, skb->len);
3072 hci_send_frame(skb);
3073
3074 conn->sent++;
3075 if (conn->sent == ~0)
3076 conn->sent = 0;
3077 }
3078 }
3079}
3080
6039aa73 3081static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3082{
73d80deb 3083 struct hci_chan *chan;
6ed58ec5 3084 struct sk_buff *skb;
02b20f0b 3085 int quote, cnt, tmp;
6ed58ec5
VT
3086
3087 BT_DBG("%s", hdev->name);
3088
52087a79
LAD
3089 if (!hci_conn_num(hdev, LE_LINK))
3090 return;
3091
6ed58ec5
VT
3092 if (!test_bit(HCI_RAW, &hdev->flags)) {
3093 /* LE tx timeout must be longer than maximum
3094 * link supervision timeout (40.9 seconds) */
bae1f5d9 3095 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3096 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3097 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3098 }
3099
3100 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3101 tmp = cnt;
73d80deb 3102 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3103 u32 priority = (skb_peek(&chan->data_q))->priority;
3104 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3105 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3106 skb->len, skb->priority);
6ed58ec5 3107
ec1cce24
LAD
3108 /* Stop if priority has changed */
3109 if (skb->priority < priority)
3110 break;
3111
3112 skb = skb_dequeue(&chan->data_q);
3113
6ed58ec5
VT
3114 hci_send_frame(skb);
3115 hdev->le_last_tx = jiffies;
3116
3117 cnt--;
73d80deb
LAD
3118 chan->sent++;
3119 chan->conn->sent++;
6ed58ec5
VT
3120 }
3121 }
73d80deb 3122
6ed58ec5
VT
3123 if (hdev->le_pkts)
3124 hdev->le_cnt = cnt;
3125 else
3126 hdev->acl_cnt = cnt;
02b20f0b
LAD
3127
3128 if (cnt != tmp)
3129 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3130}
3131
3eff45ea 3132static void hci_tx_work(struct work_struct *work)
1da177e4 3133{
3eff45ea 3134 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3135 struct sk_buff *skb;
3136
6ed58ec5 3137 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3138 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3139
3140 /* Schedule queues and send stuff to HCI driver */
3141
3142 hci_sched_acl(hdev);
3143
3144 hci_sched_sco(hdev);
3145
b6a0dc82
MH
3146 hci_sched_esco(hdev);
3147
6ed58ec5
VT
3148 hci_sched_le(hdev);
3149
1da177e4
LT
3150 /* Send next queued raw (unknown type) packet */
3151 while ((skb = skb_dequeue(&hdev->raw_q)))
3152 hci_send_frame(skb);
1da177e4
LT
3153}
3154
25985edc 3155/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3156
3157/* ACL data packet */
6039aa73 3158static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3159{
3160 struct hci_acl_hdr *hdr = (void *) skb->data;
3161 struct hci_conn *conn;
3162 __u16 handle, flags;
3163
3164 skb_pull(skb, HCI_ACL_HDR_SIZE);
3165
3166 handle = __le16_to_cpu(hdr->handle);
3167 flags = hci_flags(handle);
3168 handle = hci_handle(handle);
3169
f0e09510 3170 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3171 handle, flags);
1da177e4
LT
3172
3173 hdev->stat.acl_rx++;
3174
3175 hci_dev_lock(hdev);
3176 conn = hci_conn_hash_lookup_handle(hdev, handle);
3177 hci_dev_unlock(hdev);
8e87d142 3178
1da177e4 3179 if (conn) {
65983fc7 3180 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3181
1da177e4 3182 /* Send to upper protocol */
686ebf28
UF
3183 l2cap_recv_acldata(conn, skb, flags);
3184 return;
1da177e4 3185 } else {
8e87d142 3186 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3187 hdev->name, handle);
1da177e4
LT
3188 }
3189
3190 kfree_skb(skb);
3191}
3192
3193/* SCO data packet */
6039aa73 3194static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3195{
3196 struct hci_sco_hdr *hdr = (void *) skb->data;
3197 struct hci_conn *conn;
3198 __u16 handle;
3199
3200 skb_pull(skb, HCI_SCO_HDR_SIZE);
3201
3202 handle = __le16_to_cpu(hdr->handle);
3203
f0e09510 3204 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3205
3206 hdev->stat.sco_rx++;
3207
3208 hci_dev_lock(hdev);
3209 conn = hci_conn_hash_lookup_handle(hdev, handle);
3210 hci_dev_unlock(hdev);
3211
3212 if (conn) {
1da177e4 3213 /* Send to upper protocol */
686ebf28
UF
3214 sco_recv_scodata(conn, skb);
3215 return;
1da177e4 3216 } else {
8e87d142 3217 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3218 hdev->name, handle);
1da177e4
LT
3219 }
3220
3221 kfree_skb(skb);
3222}
3223
9238f36a
JH
3224static bool hci_req_is_complete(struct hci_dev *hdev)
3225{
3226 struct sk_buff *skb;
3227
3228 skb = skb_peek(&hdev->cmd_q);
3229 if (!skb)
3230 return true;
3231
3232 return bt_cb(skb)->req.start;
3233}
3234
42c6b129
JH
3235static void hci_resend_last(struct hci_dev *hdev)
3236{
3237 struct hci_command_hdr *sent;
3238 struct sk_buff *skb;
3239 u16 opcode;
3240
3241 if (!hdev->sent_cmd)
3242 return;
3243
3244 sent = (void *) hdev->sent_cmd->data;
3245 opcode = __le16_to_cpu(sent->opcode);
3246 if (opcode == HCI_OP_RESET)
3247 return;
3248
3249 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3250 if (!skb)
3251 return;
3252
3253 skb_queue_head(&hdev->cmd_q, skb);
3254 queue_work(hdev->workqueue, &hdev->cmd_work);
3255}
3256
9238f36a
JH
3257void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3258{
3259 hci_req_complete_t req_complete = NULL;
3260 struct sk_buff *skb;
3261 unsigned long flags;
3262
3263 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3264
42c6b129
JH
3265 /* If the completed command doesn't match the last one that was
3266 * sent we need to do special handling of it.
9238f36a 3267 */
42c6b129
JH
3268 if (!hci_sent_cmd_data(hdev, opcode)) {
3269 /* Some CSR based controllers generate a spontaneous
3270 * reset complete event during init and any pending
3271 * command will never be completed. In such a case we
3272 * need to resend whatever was the last sent
3273 * command.
3274 */
3275 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3276 hci_resend_last(hdev);
3277
9238f36a 3278 return;
42c6b129 3279 }
9238f36a
JH
3280
3281 /* If the command succeeded and there's still more commands in
3282 * this request the request is not yet complete.
3283 */
3284 if (!status && !hci_req_is_complete(hdev))
3285 return;
3286
3287 /* If this was the last command in a request the complete
3288 * callback would be found in hdev->sent_cmd instead of the
3289 * command queue (hdev->cmd_q).
3290 */
3291 if (hdev->sent_cmd) {
3292 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3293 if (req_complete)
3294 goto call_complete;
3295 }
3296
3297 /* Remove all pending commands belonging to this request */
3298 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3299 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3300 if (bt_cb(skb)->req.start) {
3301 __skb_queue_head(&hdev->cmd_q, skb);
3302 break;
3303 }
3304
3305 req_complete = bt_cb(skb)->req.complete;
3306 kfree_skb(skb);
3307 }
3308 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3309
3310call_complete:
3311 if (req_complete)
3312 req_complete(hdev, status);
3313}
3314
3315void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3316{
3317 hci_req_complete_t req_complete = NULL;
3318
3319 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3320
3321 if (status) {
3322 hci_req_cmd_complete(hdev, opcode, status);
3323 return;
3324 }
3325
3326 /* No need to handle success status if there are more commands */
3327 if (!hci_req_is_complete(hdev))
3328 return;
3329
3330 if (hdev->sent_cmd)
3331 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3332
3333 /* If the request doesn't have a complete callback or there
3334 * are other commands/requests in the hdev queue we consider
3335 * this request as completed.
3336 */
3337 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3338 hci_req_cmd_complete(hdev, opcode, status);
3339}
3340
b78752cc 3341static void hci_rx_work(struct work_struct *work)
1da177e4 3342{
b78752cc 3343 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3344 struct sk_buff *skb;
3345
3346 BT_DBG("%s", hdev->name);
3347
1da177e4 3348 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3349 /* Send copy to monitor */
3350 hci_send_to_monitor(hdev, skb);
3351
1da177e4
LT
3352 if (atomic_read(&hdev->promisc)) {
3353 /* Send copy to the sockets */
470fe1b5 3354 hci_send_to_sock(hdev, skb);
1da177e4
LT
3355 }
3356
3357 if (test_bit(HCI_RAW, &hdev->flags)) {
3358 kfree_skb(skb);
3359 continue;
3360 }
3361
3362 if (test_bit(HCI_INIT, &hdev->flags)) {
3363 /* Don't process data packets in this states. */
0d48d939 3364 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3365 case HCI_ACLDATA_PKT:
3366 case HCI_SCODATA_PKT:
3367 kfree_skb(skb);
3368 continue;
3ff50b79 3369 }
1da177e4
LT
3370 }
3371
3372 /* Process frame */
0d48d939 3373 switch (bt_cb(skb)->pkt_type) {
1da177e4 3374 case HCI_EVENT_PKT:
b78752cc 3375 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3376 hci_event_packet(hdev, skb);
3377 break;
3378
3379 case HCI_ACLDATA_PKT:
3380 BT_DBG("%s ACL data packet", hdev->name);
3381 hci_acldata_packet(hdev, skb);
3382 break;
3383
3384 case HCI_SCODATA_PKT:
3385 BT_DBG("%s SCO data packet", hdev->name);
3386 hci_scodata_packet(hdev, skb);
3387 break;
3388
3389 default:
3390 kfree_skb(skb);
3391 break;
3392 }
3393 }
1da177e4
LT
3394}
3395
c347b765 3396static void hci_cmd_work(struct work_struct *work)
1da177e4 3397{
c347b765 3398 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3399 struct sk_buff *skb;
3400
2104786b
AE
3401 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3402 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3403
1da177e4 3404 /* Send queued commands */
5a08ecce
AE
3405 if (atomic_read(&hdev->cmd_cnt)) {
3406 skb = skb_dequeue(&hdev->cmd_q);
3407 if (!skb)
3408 return;
3409
7585b97a 3410 kfree_skb(hdev->sent_cmd);
1da177e4 3411
70f23020
AE
3412 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3413 if (hdev->sent_cmd) {
1da177e4
LT
3414 atomic_dec(&hdev->cmd_cnt);
3415 hci_send_frame(skb);
7bdb8a5c
SJ
3416 if (test_bit(HCI_RESET, &hdev->flags))
3417 del_timer(&hdev->cmd_timer);
3418 else
3419 mod_timer(&hdev->cmd_timer,
5f246e89 3420 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3421 } else {
3422 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3423 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3424 }
3425 }
3426}
2519a1fc
AG
3427
3428int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3429{
3430 /* General inquiry access code (GIAC) */
3431 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3432 struct hci_cp_inquiry cp;
3433
3434 BT_DBG("%s", hdev->name);
3435
3436 if (test_bit(HCI_INQUIRY, &hdev->flags))
3437 return -EINPROGRESS;
3438
4663262c
JH
3439 inquiry_cache_flush(hdev);
3440
2519a1fc
AG
3441 memset(&cp, 0, sizeof(cp));
3442 memcpy(&cp.lap, lap, sizeof(cp.lap));
3443 cp.length = length;
3444
3445 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3446}
023d5049
AG
3447
3448int hci_cancel_inquiry(struct hci_dev *hdev)
3449{
3450 BT_DBG("%s", hdev->name);
3451
3452 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 3453 return -EALREADY;
023d5049
AG
3454
3455 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3456}
31f7956c
AG
3457
3458u8 bdaddr_to_le(u8 bdaddr_type)
3459{
3460 switch (bdaddr_type) {
3461 case BDADDR_LE_PUBLIC:
3462 return ADDR_LE_DEV_PUBLIC;
3463
3464 default:
3465 /* Fallback to LE Random address type */
3466 return ADDR_LE_DEV_RANDOM;
3467 }
3468}