Bluetooth: Remove empty HCI event handlers
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
01178cd4 83static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
84 void (*func)(struct hci_request *req,
85 unsigned long opt),
01178cd4 86 unsigned long opt, __u32 timeout)
1da177e4 87{
42c6b129 88 struct hci_request req;
1da177e4
LT
89 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
42c6b129
JH
94 hci_req_init(&req, hdev);
95
1da177e4
LT
96 hdev->req_status = HCI_REQ_PEND;
97
98 add_wait_queue(&hdev->req_wait_q, &wait);
99 set_current_state(TASK_INTERRUPTIBLE);
100
42c6b129 101 func(&req, opt);
53cce22d 102
42c6b129
JH
103 err = hci_req_run(&req, hci_req_sync_complete);
104 if (err < 0) {
53cce22d
JH
105 hdev->req_status = 0;
106 remove_wait_queue(&hdev->req_wait_q, &wait);
42c6b129
JH
107 /* req_run will fail if the request did not add any
108 * commands to the queue, something that can happen when
109 * a request with conditionals doesn't trigger any
110 * commands to be sent. This is normal behavior and
111 * should not trigger an error return.
112 */
113 return 0;
53cce22d
JH
114 }
115
1da177e4
LT
116 schedule_timeout(timeout);
117
118 remove_wait_queue(&hdev->req_wait_q, &wait);
119
120 if (signal_pending(current))
121 return -EINTR;
122
123 switch (hdev->req_status) {
124 case HCI_REQ_DONE:
e175072f 125 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
126 break;
127
128 case HCI_REQ_CANCELED:
129 err = -hdev->req_result;
130 break;
131
132 default:
133 err = -ETIMEDOUT;
134 break;
3ff50b79 135 }
1da177e4 136
a5040efa 137 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
138
139 BT_DBG("%s end: err %d", hdev->name, err);
140
141 return err;
142}
143
01178cd4 144static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
145 void (*req)(struct hci_request *req,
146 unsigned long opt),
01178cd4 147 unsigned long opt, __u32 timeout)
1da177e4
LT
148{
149 int ret;
150
7c6a329e
MH
151 if (!test_bit(HCI_UP, &hdev->flags))
152 return -ENETDOWN;
153
1da177e4
LT
154 /* Serialize all requests */
155 hci_req_lock(hdev);
01178cd4 156 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
157 hci_req_unlock(hdev);
158
159 return ret;
160}
161
42c6b129 162static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 163{
42c6b129 164 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
165
166 /* Reset device */
42c6b129
JH
167 set_bit(HCI_RESET, &req->hdev->flags);
168 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
169}
170
42c6b129 171static void bredr_init(struct hci_request *req)
1da177e4 172{
42c6b129 173 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 174
1da177e4 175 /* Read Local Supported Features */
42c6b129 176 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 177
1143e5a6 178 /* Read Local Version */
42c6b129 179 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
180
181 /* Read BD Address */
42c6b129 182 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
183}
184
42c6b129 185static void amp_init(struct hci_request *req)
e61ef499 186{
42c6b129 187 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 188
e61ef499 189 /* Read Local Version */
42c6b129 190 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
191
192 /* Read Local AMP Info */
42c6b129 193 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
194
195 /* Read Data Blk size */
42c6b129 196 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
197}
198
42c6b129 199static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 200{
42c6b129
JH
201 struct hci_dev *hdev = req->hdev;
202 struct hci_request init_req;
e61ef499
AE
203 struct sk_buff *skb;
204
205 BT_DBG("%s %ld", hdev->name, opt);
206
207 /* Driver initialization */
208
42c6b129
JH
209 hci_req_init(&init_req, hdev);
210
e61ef499
AE
211 /* Special commands */
212 while ((skb = skb_dequeue(&hdev->driver_init))) {
213 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
214 skb->dev = (void *) hdev;
215
42c6b129
JH
216 if (skb_queue_empty(&init_req.cmd_q))
217 bt_cb(skb)->req.start = true;
218
219 skb_queue_tail(&init_req.cmd_q, skb);
e61ef499
AE
220 }
221 skb_queue_purge(&hdev->driver_init);
222
42c6b129
JH
223 hci_req_run(&init_req, NULL);
224
11778716
AE
225 /* Reset */
226 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 227 hci_reset_req(req, 0);
11778716 228
e61ef499
AE
229 switch (hdev->dev_type) {
230 case HCI_BREDR:
42c6b129 231 bredr_init(req);
e61ef499
AE
232 break;
233
234 case HCI_AMP:
42c6b129 235 amp_init(req);
e61ef499
AE
236 break;
237
238 default:
239 BT_ERR("Unknown device type %d", hdev->dev_type);
240 break;
241 }
e61ef499
AE
242}
243
42c6b129 244static void bredr_setup(struct hci_request *req)
2177bab5
JH
245{
246 struct hci_cp_delete_stored_link_key cp;
247 __le16 param;
248 __u8 flt_type;
249
250 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 251 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
252
253 /* Read Class of Device */
42c6b129 254 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
255
256 /* Read Local Name */
42c6b129 257 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
258
259 /* Read Voice Setting */
42c6b129 260 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
261
262 /* Clear Event Filters */
263 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 264 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
265
266 /* Connection accept timeout ~20 secs */
267 param = __constant_cpu_to_le16(0x7d00);
42c6b129 268 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
269
270 bacpy(&cp.bdaddr, BDADDR_ANY);
271 cp.delete_all = 0x01;
42c6b129 272 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
2177bab5
JH
273}
274
42c6b129 275static void le_setup(struct hci_request *req)
2177bab5
JH
276{
277 /* Read LE Buffer Size */
42c6b129 278 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
279
280 /* Read LE Local Supported Features */
42c6b129 281 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
282
283 /* Read LE Advertising Channel TX Power */
42c6b129 284 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
285
286 /* Read LE White List Size */
42c6b129 287 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
288
289 /* Read LE Supported States */
42c6b129 290 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
2177bab5
JH
291}
292
293static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
294{
295 if (lmp_ext_inq_capable(hdev))
296 return 0x02;
297
298 if (lmp_inq_rssi_capable(hdev))
299 return 0x01;
300
301 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
302 hdev->lmp_subver == 0x0757)
303 return 0x01;
304
305 if (hdev->manufacturer == 15) {
306 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
307 return 0x01;
308 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
309 return 0x01;
310 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
311 return 0x01;
312 }
313
314 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
315 hdev->lmp_subver == 0x1805)
316 return 0x01;
317
318 return 0x00;
319}
320
42c6b129 321static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
322{
323 u8 mode;
324
42c6b129 325 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 326
42c6b129 327 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
328}
329
42c6b129 330static void hci_setup_event_mask(struct hci_request *req)
2177bab5 331{
42c6b129
JH
332 struct hci_dev *hdev = req->hdev;
333
2177bab5
JH
334 /* The second byte is 0xff instead of 0x9f (two reserved bits
335 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
336 * command otherwise.
337 */
338 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
339
340 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
341 * any event mask for pre 1.2 devices.
342 */
343 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
344 return;
345
346 if (lmp_bredr_capable(hdev)) {
347 events[4] |= 0x01; /* Flow Specification Complete */
348 events[4] |= 0x02; /* Inquiry Result with RSSI */
349 events[4] |= 0x04; /* Read Remote Extended Features Complete */
350 events[5] |= 0x08; /* Synchronous Connection Complete */
351 events[5] |= 0x10; /* Synchronous Connection Changed */
352 }
353
354 if (lmp_inq_rssi_capable(hdev))
355 events[4] |= 0x02; /* Inquiry Result with RSSI */
356
357 if (lmp_sniffsubr_capable(hdev))
358 events[5] |= 0x20; /* Sniff Subrating */
359
360 if (lmp_pause_enc_capable(hdev))
361 events[5] |= 0x80; /* Encryption Key Refresh Complete */
362
363 if (lmp_ext_inq_capable(hdev))
364 events[5] |= 0x40; /* Extended Inquiry Result */
365
366 if (lmp_no_flush_capable(hdev))
367 events[7] |= 0x01; /* Enhanced Flush Complete */
368
369 if (lmp_lsto_capable(hdev))
370 events[6] |= 0x80; /* Link Supervision Timeout Changed */
371
372 if (lmp_ssp_capable(hdev)) {
373 events[6] |= 0x01; /* IO Capability Request */
374 events[6] |= 0x02; /* IO Capability Response */
375 events[6] |= 0x04; /* User Confirmation Request */
376 events[6] |= 0x08; /* User Passkey Request */
377 events[6] |= 0x10; /* Remote OOB Data Request */
378 events[6] |= 0x20; /* Simple Pairing Complete */
379 events[7] |= 0x04; /* User Passkey Notification */
380 events[7] |= 0x08; /* Keypress Notification */
381 events[7] |= 0x10; /* Remote Host Supported
382 * Features Notification
383 */
384 }
385
386 if (lmp_le_capable(hdev))
387 events[7] |= 0x20; /* LE Meta-Event */
388
42c6b129 389 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
390
391 if (lmp_le_capable(hdev)) {
392 memset(events, 0, sizeof(events));
393 events[0] = 0x1f;
42c6b129
JH
394 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
395 sizeof(events), events);
2177bab5
JH
396 }
397}
398
42c6b129 399static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 400{
42c6b129
JH
401 struct hci_dev *hdev = req->hdev;
402
2177bab5 403 if (lmp_bredr_capable(hdev))
42c6b129 404 bredr_setup(req);
2177bab5
JH
405
406 if (lmp_le_capable(hdev))
42c6b129 407 le_setup(req);
2177bab5 408
42c6b129 409 hci_setup_event_mask(req);
2177bab5
JH
410
411 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 412 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
413
414 if (lmp_ssp_capable(hdev)) {
415 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
416 u8 mode = 0x01;
42c6b129
JH
417 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
418 sizeof(mode), &mode);
2177bab5
JH
419 } else {
420 struct hci_cp_write_eir cp;
421
422 memset(hdev->eir, 0, sizeof(hdev->eir));
423 memset(&cp, 0, sizeof(cp));
424
42c6b129 425 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
426 }
427 }
428
429 if (lmp_inq_rssi_capable(hdev))
42c6b129 430 hci_setup_inquiry_mode(req);
2177bab5
JH
431
432 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 433 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
434
435 if (lmp_ext_feat_capable(hdev)) {
436 struct hci_cp_read_local_ext_features cp;
437
438 cp.page = 0x01;
42c6b129
JH
439 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
440 sizeof(cp), &cp);
2177bab5
JH
441 }
442
443 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
444 u8 enable = 1;
42c6b129
JH
445 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
446 &enable);
2177bab5
JH
447 }
448}
449
42c6b129 450static void hci_setup_link_policy(struct hci_request *req)
2177bab5 451{
42c6b129 452 struct hci_dev *hdev = req->hdev;
2177bab5
JH
453 struct hci_cp_write_def_link_policy cp;
454 u16 link_policy = 0;
455
456 if (lmp_rswitch_capable(hdev))
457 link_policy |= HCI_LP_RSWITCH;
458 if (lmp_hold_capable(hdev))
459 link_policy |= HCI_LP_HOLD;
460 if (lmp_sniff_capable(hdev))
461 link_policy |= HCI_LP_SNIFF;
462 if (lmp_park_capable(hdev))
463 link_policy |= HCI_LP_PARK;
464
465 cp.policy = cpu_to_le16(link_policy);
42c6b129 466 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
467}
468
42c6b129 469static void hci_set_le_support(struct hci_request *req)
2177bab5 470{
42c6b129 471 struct hci_dev *hdev = req->hdev;
2177bab5
JH
472 struct hci_cp_write_le_host_supported cp;
473
474 memset(&cp, 0, sizeof(cp));
475
476 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
477 cp.le = 0x01;
478 cp.simul = lmp_le_br_capable(hdev);
479 }
480
481 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
482 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
483 &cp);
2177bab5
JH
484}
485
42c6b129 486static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 487{
42c6b129
JH
488 struct hci_dev *hdev = req->hdev;
489
2177bab5 490 if (hdev->commands[5] & 0x10)
42c6b129 491 hci_setup_link_policy(req);
2177bab5
JH
492
493 if (lmp_le_capable(hdev))
42c6b129 494 hci_set_le_support(req);
2177bab5
JH
495}
496
497static int __hci_init(struct hci_dev *hdev)
498{
499 int err;
500
501 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
502 if (err < 0)
503 return err;
504
505 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
506 * BR/EDR/LE type controllers. AMP controllers only need the
507 * first stage init.
508 */
509 if (hdev->dev_type != HCI_BREDR)
510 return 0;
511
512 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
513 if (err < 0)
514 return err;
515
516 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
517}
518
42c6b129 519static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
520{
521 __u8 scan = opt;
522
42c6b129 523 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
524
525 /* Inquiry and Page scans */
42c6b129 526 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
527}
528
42c6b129 529static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
530{
531 __u8 auth = opt;
532
42c6b129 533 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
534
535 /* Authentication */
42c6b129 536 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
537}
538
42c6b129 539static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
540{
541 __u8 encrypt = opt;
542
42c6b129 543 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 544
e4e8e37c 545 /* Encryption */
42c6b129 546 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
547}
548
42c6b129 549static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
550{
551 __le16 policy = cpu_to_le16(opt);
552
42c6b129 553 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
554
555 /* Default link policy */
42c6b129 556 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
557}
558
8e87d142 559/* Get HCI device by index.
1da177e4
LT
560 * Device is held on return. */
561struct hci_dev *hci_dev_get(int index)
562{
8035ded4 563 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
564
565 BT_DBG("%d", index);
566
567 if (index < 0)
568 return NULL;
569
570 read_lock(&hci_dev_list_lock);
8035ded4 571 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
572 if (d->id == index) {
573 hdev = hci_dev_hold(d);
574 break;
575 }
576 }
577 read_unlock(&hci_dev_list_lock);
578 return hdev;
579}
1da177e4
LT
580
581/* ---- Inquiry support ---- */
ff9ef578 582
30dc78e1
JH
583bool hci_discovery_active(struct hci_dev *hdev)
584{
585 struct discovery_state *discov = &hdev->discovery;
586
6fbe195d 587 switch (discov->state) {
343f935b 588 case DISCOVERY_FINDING:
6fbe195d 589 case DISCOVERY_RESOLVING:
30dc78e1
JH
590 return true;
591
6fbe195d
AG
592 default:
593 return false;
594 }
30dc78e1
JH
595}
596
ff9ef578
JH
597void hci_discovery_set_state(struct hci_dev *hdev, int state)
598{
599 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
600
601 if (hdev->discovery.state == state)
602 return;
603
604 switch (state) {
605 case DISCOVERY_STOPPED:
7b99b659
AG
606 if (hdev->discovery.state != DISCOVERY_STARTING)
607 mgmt_discovering(hdev, 0);
ff9ef578
JH
608 break;
609 case DISCOVERY_STARTING:
610 break;
343f935b 611 case DISCOVERY_FINDING:
ff9ef578
JH
612 mgmt_discovering(hdev, 1);
613 break;
30dc78e1
JH
614 case DISCOVERY_RESOLVING:
615 break;
ff9ef578
JH
616 case DISCOVERY_STOPPING:
617 break;
618 }
619
620 hdev->discovery.state = state;
621}
622
1da177e4
LT
623static void inquiry_cache_flush(struct hci_dev *hdev)
624{
30883512 625 struct discovery_state *cache = &hdev->discovery;
b57c1a56 626 struct inquiry_entry *p, *n;
1da177e4 627
561aafbc
JH
628 list_for_each_entry_safe(p, n, &cache->all, all) {
629 list_del(&p->all);
b57c1a56 630 kfree(p);
1da177e4 631 }
561aafbc
JH
632
633 INIT_LIST_HEAD(&cache->unknown);
634 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
635}
636
a8c5fb1a
GP
637struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
638 bdaddr_t *bdaddr)
1da177e4 639{
30883512 640 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
641 struct inquiry_entry *e;
642
6ed93dc6 643 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 644
561aafbc
JH
645 list_for_each_entry(e, &cache->all, all) {
646 if (!bacmp(&e->data.bdaddr, bdaddr))
647 return e;
648 }
649
650 return NULL;
651}
652
653struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 654 bdaddr_t *bdaddr)
561aafbc 655{
30883512 656 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
657 struct inquiry_entry *e;
658
6ed93dc6 659 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
660
661 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 662 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
663 return e;
664 }
665
666 return NULL;
1da177e4
LT
667}
668
30dc78e1 669struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
670 bdaddr_t *bdaddr,
671 int state)
30dc78e1
JH
672{
673 struct discovery_state *cache = &hdev->discovery;
674 struct inquiry_entry *e;
675
6ed93dc6 676 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
677
678 list_for_each_entry(e, &cache->resolve, list) {
679 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
680 return e;
681 if (!bacmp(&e->data.bdaddr, bdaddr))
682 return e;
683 }
684
685 return NULL;
686}
687
a3d4e20a 688void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 689 struct inquiry_entry *ie)
a3d4e20a
JH
690{
691 struct discovery_state *cache = &hdev->discovery;
692 struct list_head *pos = &cache->resolve;
693 struct inquiry_entry *p;
694
695 list_del(&ie->list);
696
697 list_for_each_entry(p, &cache->resolve, list) {
698 if (p->name_state != NAME_PENDING &&
a8c5fb1a 699 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
700 break;
701 pos = &p->list;
702 }
703
704 list_add(&ie->list, pos);
705}
706
3175405b 707bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 708 bool name_known, bool *ssp)
1da177e4 709{
30883512 710 struct discovery_state *cache = &hdev->discovery;
70f23020 711 struct inquiry_entry *ie;
1da177e4 712
6ed93dc6 713 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 714
2b2fec4d
SJ
715 hci_remove_remote_oob_data(hdev, &data->bdaddr);
716
388fc8fa
JH
717 if (ssp)
718 *ssp = data->ssp_mode;
719
70f23020 720 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 721 if (ie) {
388fc8fa
JH
722 if (ie->data.ssp_mode && ssp)
723 *ssp = true;
724
a3d4e20a 725 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 726 data->rssi != ie->data.rssi) {
a3d4e20a
JH
727 ie->data.rssi = data->rssi;
728 hci_inquiry_cache_update_resolve(hdev, ie);
729 }
730
561aafbc 731 goto update;
a3d4e20a 732 }
561aafbc
JH
733
734 /* Entry not in the cache. Add new one. */
735 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
736 if (!ie)
3175405b 737 return false;
561aafbc
JH
738
739 list_add(&ie->all, &cache->all);
740
741 if (name_known) {
742 ie->name_state = NAME_KNOWN;
743 } else {
744 ie->name_state = NAME_NOT_KNOWN;
745 list_add(&ie->list, &cache->unknown);
746 }
70f23020 747
561aafbc
JH
748update:
749 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 750 ie->name_state != NAME_PENDING) {
561aafbc
JH
751 ie->name_state = NAME_KNOWN;
752 list_del(&ie->list);
1da177e4
LT
753 }
754
70f23020
AE
755 memcpy(&ie->data, data, sizeof(*data));
756 ie->timestamp = jiffies;
1da177e4 757 cache->timestamp = jiffies;
3175405b
JH
758
759 if (ie->name_state == NAME_NOT_KNOWN)
760 return false;
761
762 return true;
1da177e4
LT
763}
764
765static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
766{
30883512 767 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
768 struct inquiry_info *info = (struct inquiry_info *) buf;
769 struct inquiry_entry *e;
770 int copied = 0;
771
561aafbc 772 list_for_each_entry(e, &cache->all, all) {
1da177e4 773 struct inquiry_data *data = &e->data;
b57c1a56
JH
774
775 if (copied >= num)
776 break;
777
1da177e4
LT
778 bacpy(&info->bdaddr, &data->bdaddr);
779 info->pscan_rep_mode = data->pscan_rep_mode;
780 info->pscan_period_mode = data->pscan_period_mode;
781 info->pscan_mode = data->pscan_mode;
782 memcpy(info->dev_class, data->dev_class, 3);
783 info->clock_offset = data->clock_offset;
b57c1a56 784
1da177e4 785 info++;
b57c1a56 786 copied++;
1da177e4
LT
787 }
788
789 BT_DBG("cache %p, copied %d", cache, copied);
790 return copied;
791}
792
42c6b129 793static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
794{
795 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 796 struct hci_dev *hdev = req->hdev;
1da177e4
LT
797 struct hci_cp_inquiry cp;
798
799 BT_DBG("%s", hdev->name);
800
801 if (test_bit(HCI_INQUIRY, &hdev->flags))
802 return;
803
804 /* Start Inquiry */
805 memcpy(&cp.lap, &ir->lap, 3);
806 cp.length = ir->length;
807 cp.num_rsp = ir->num_rsp;
42c6b129 808 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
809}
810
811int hci_inquiry(void __user *arg)
812{
813 __u8 __user *ptr = arg;
814 struct hci_inquiry_req ir;
815 struct hci_dev *hdev;
816 int err = 0, do_inquiry = 0, max_rsp;
817 long timeo;
818 __u8 *buf;
819
820 if (copy_from_user(&ir, ptr, sizeof(ir)))
821 return -EFAULT;
822
5a08ecce
AE
823 hdev = hci_dev_get(ir.dev_id);
824 if (!hdev)
1da177e4
LT
825 return -ENODEV;
826
09fd0de5 827 hci_dev_lock(hdev);
8e87d142 828 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 829 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
830 inquiry_cache_flush(hdev);
831 do_inquiry = 1;
832 }
09fd0de5 833 hci_dev_unlock(hdev);
1da177e4 834
04837f64 835 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
836
837 if (do_inquiry) {
01178cd4
JH
838 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
839 timeo);
70f23020
AE
840 if (err < 0)
841 goto done;
842 }
1da177e4 843
8fc9ced3
GP
844 /* for unlimited number of responses we will use buffer with
845 * 255 entries
846 */
1da177e4
LT
847 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
848
849 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
850 * copy it to the user space.
851 */
01df8c31 852 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 853 if (!buf) {
1da177e4
LT
854 err = -ENOMEM;
855 goto done;
856 }
857
09fd0de5 858 hci_dev_lock(hdev);
1da177e4 859 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 860 hci_dev_unlock(hdev);
1da177e4
LT
861
862 BT_DBG("num_rsp %d", ir.num_rsp);
863
864 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
865 ptr += sizeof(ir);
866 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 867 ir.num_rsp))
1da177e4 868 err = -EFAULT;
8e87d142 869 } else
1da177e4
LT
870 err = -EFAULT;
871
872 kfree(buf);
873
874done:
875 hci_dev_put(hdev);
876 return err;
877}
878
3f0f524b
JH
879static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
880{
881 u8 ad_len = 0, flags = 0;
882 size_t name_len;
883
884 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
885 flags |= LE_AD_GENERAL;
886
887 if (!lmp_bredr_capable(hdev))
888 flags |= LE_AD_NO_BREDR;
889
890 if (lmp_le_br_capable(hdev))
891 flags |= LE_AD_SIM_LE_BREDR_CTRL;
892
893 if (lmp_host_le_br_capable(hdev))
894 flags |= LE_AD_SIM_LE_BREDR_HOST;
895
896 if (flags) {
897 BT_DBG("adv flags 0x%02x", flags);
898
899 ptr[0] = 2;
900 ptr[1] = EIR_FLAGS;
901 ptr[2] = flags;
902
903 ad_len += 3;
904 ptr += 3;
905 }
906
907 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
908 ptr[0] = 2;
909 ptr[1] = EIR_TX_POWER;
910 ptr[2] = (u8) hdev->adv_tx_power;
911
912 ad_len += 3;
913 ptr += 3;
914 }
915
916 name_len = strlen(hdev->dev_name);
917 if (name_len > 0) {
918 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
919
920 if (name_len > max_len) {
921 name_len = max_len;
922 ptr[1] = EIR_NAME_SHORT;
923 } else
924 ptr[1] = EIR_NAME_COMPLETE;
925
926 ptr[0] = name_len + 1;
927
928 memcpy(ptr + 2, hdev->dev_name, name_len);
929
930 ad_len += (name_len + 2);
931 ptr += (name_len + 2);
932 }
933
934 return ad_len;
935}
936
937int hci_update_ad(struct hci_dev *hdev)
938{
939 struct hci_cp_le_set_adv_data cp;
940 u8 len;
941 int err;
942
943 hci_dev_lock(hdev);
944
945 if (!lmp_le_capable(hdev)) {
946 err = -EINVAL;
947 goto unlock;
948 }
949
950 memset(&cp, 0, sizeof(cp));
951
952 len = create_ad(hdev, cp.data);
953
954 if (hdev->adv_data_len == len &&
955 memcmp(cp.data, hdev->adv_data, len) == 0) {
956 err = 0;
957 goto unlock;
958 }
959
960 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
961 hdev->adv_data_len = len;
962
963 cp.length = len;
964 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
965
966unlock:
967 hci_dev_unlock(hdev);
968
969 return err;
970}
971
1da177e4
LT
972/* ---- HCI ioctl helpers ---- */
973
974int hci_dev_open(__u16 dev)
975{
976 struct hci_dev *hdev;
977 int ret = 0;
978
5a08ecce
AE
979 hdev = hci_dev_get(dev);
980 if (!hdev)
1da177e4
LT
981 return -ENODEV;
982
983 BT_DBG("%s %p", hdev->name, hdev);
984
985 hci_req_lock(hdev);
986
94324962
JH
987 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
988 ret = -ENODEV;
989 goto done;
990 }
991
611b30f7
MH
992 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
993 ret = -ERFKILL;
994 goto done;
995 }
996
1da177e4
LT
997 if (test_bit(HCI_UP, &hdev->flags)) {
998 ret = -EALREADY;
999 goto done;
1000 }
1001
1002 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1003 set_bit(HCI_RAW, &hdev->flags);
1004
07e3b94a
AE
1005 /* Treat all non BR/EDR controllers as raw devices if
1006 enable_hs is not set */
1007 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
1008 set_bit(HCI_RAW, &hdev->flags);
1009
1da177e4
LT
1010 if (hdev->open(hdev)) {
1011 ret = -EIO;
1012 goto done;
1013 }
1014
1015 if (!test_bit(HCI_RAW, &hdev->flags)) {
1016 atomic_set(&hdev->cmd_cnt, 1);
1017 set_bit(HCI_INIT, &hdev->flags);
2177bab5 1018 ret = __hci_init(hdev);
1da177e4
LT
1019 clear_bit(HCI_INIT, &hdev->flags);
1020 }
1021
1022 if (!ret) {
1023 hci_dev_hold(hdev);
1024 set_bit(HCI_UP, &hdev->flags);
1025 hci_notify(hdev, HCI_DEV_UP);
3f0f524b 1026 hci_update_ad(hdev);
bb4b2a9a
AE
1027 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1028 mgmt_valid_hdev(hdev)) {
09fd0de5 1029 hci_dev_lock(hdev);
744cf19e 1030 mgmt_powered(hdev, 1);
09fd0de5 1031 hci_dev_unlock(hdev);
56e5cb86 1032 }
8e87d142 1033 } else {
1da177e4 1034 /* Init failed, cleanup */
3eff45ea 1035 flush_work(&hdev->tx_work);
c347b765 1036 flush_work(&hdev->cmd_work);
b78752cc 1037 flush_work(&hdev->rx_work);
1da177e4
LT
1038
1039 skb_queue_purge(&hdev->cmd_q);
1040 skb_queue_purge(&hdev->rx_q);
1041
1042 if (hdev->flush)
1043 hdev->flush(hdev);
1044
1045 if (hdev->sent_cmd) {
1046 kfree_skb(hdev->sent_cmd);
1047 hdev->sent_cmd = NULL;
1048 }
1049
1050 hdev->close(hdev);
1051 hdev->flags = 0;
1052 }
1053
1054done:
1055 hci_req_unlock(hdev);
1056 hci_dev_put(hdev);
1057 return ret;
1058}
1059
1060static int hci_dev_do_close(struct hci_dev *hdev)
1061{
1062 BT_DBG("%s %p", hdev->name, hdev);
1063
28b75a89
AG
1064 cancel_work_sync(&hdev->le_scan);
1065
78c04c0b
VCG
1066 cancel_delayed_work(&hdev->power_off);
1067
1da177e4
LT
1068 hci_req_cancel(hdev, ENODEV);
1069 hci_req_lock(hdev);
1070
1071 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1072 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1073 hci_req_unlock(hdev);
1074 return 0;
1075 }
1076
3eff45ea
GP
1077 /* Flush RX and TX works */
1078 flush_work(&hdev->tx_work);
b78752cc 1079 flush_work(&hdev->rx_work);
1da177e4 1080
16ab91ab 1081 if (hdev->discov_timeout > 0) {
e0f9309f 1082 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1083 hdev->discov_timeout = 0;
5e5282bb 1084 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1085 }
1086
a8b2d5c2 1087 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1088 cancel_delayed_work(&hdev->service_cache);
1089
7ba8b4be
AG
1090 cancel_delayed_work_sync(&hdev->le_scan_disable);
1091
09fd0de5 1092 hci_dev_lock(hdev);
1da177e4
LT
1093 inquiry_cache_flush(hdev);
1094 hci_conn_hash_flush(hdev);
09fd0de5 1095 hci_dev_unlock(hdev);
1da177e4
LT
1096
1097 hci_notify(hdev, HCI_DEV_DOWN);
1098
1099 if (hdev->flush)
1100 hdev->flush(hdev);
1101
1102 /* Reset device */
1103 skb_queue_purge(&hdev->cmd_q);
1104 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1105 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1106 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1107 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1108 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1109 clear_bit(HCI_INIT, &hdev->flags);
1110 }
1111
c347b765
GP
1112 /* flush cmd work */
1113 flush_work(&hdev->cmd_work);
1da177e4
LT
1114
1115 /* Drop queues */
1116 skb_queue_purge(&hdev->rx_q);
1117 skb_queue_purge(&hdev->cmd_q);
1118 skb_queue_purge(&hdev->raw_q);
1119
1120 /* Drop last sent command */
1121 if (hdev->sent_cmd) {
b79f44c1 1122 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1123 kfree_skb(hdev->sent_cmd);
1124 hdev->sent_cmd = NULL;
1125 }
1126
1127 /* After this point our queues are empty
1128 * and no tasks are scheduled. */
1129 hdev->close(hdev);
1130
bb4b2a9a
AE
1131 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1132 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1133 hci_dev_lock(hdev);
1134 mgmt_powered(hdev, 0);
1135 hci_dev_unlock(hdev);
1136 }
5add6af8 1137
1da177e4
LT
1138 /* Clear flags */
1139 hdev->flags = 0;
1140
ced5c338
AE
1141 /* Controller radio is available but is currently powered down */
1142 hdev->amp_status = 0;
1143
e59fda8d 1144 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1145 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1146
1da177e4
LT
1147 hci_req_unlock(hdev);
1148
1149 hci_dev_put(hdev);
1150 return 0;
1151}
1152
1153int hci_dev_close(__u16 dev)
1154{
1155 struct hci_dev *hdev;
1156 int err;
1157
70f23020
AE
1158 hdev = hci_dev_get(dev);
1159 if (!hdev)
1da177e4 1160 return -ENODEV;
8ee56540
MH
1161
1162 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1163 cancel_delayed_work(&hdev->power_off);
1164
1da177e4 1165 err = hci_dev_do_close(hdev);
8ee56540 1166
1da177e4
LT
1167 hci_dev_put(hdev);
1168 return err;
1169}
1170
1171int hci_dev_reset(__u16 dev)
1172{
1173 struct hci_dev *hdev;
1174 int ret = 0;
1175
70f23020
AE
1176 hdev = hci_dev_get(dev);
1177 if (!hdev)
1da177e4
LT
1178 return -ENODEV;
1179
1180 hci_req_lock(hdev);
1da177e4
LT
1181
1182 if (!test_bit(HCI_UP, &hdev->flags))
1183 goto done;
1184
1185 /* Drop queues */
1186 skb_queue_purge(&hdev->rx_q);
1187 skb_queue_purge(&hdev->cmd_q);
1188
09fd0de5 1189 hci_dev_lock(hdev);
1da177e4
LT
1190 inquiry_cache_flush(hdev);
1191 hci_conn_hash_flush(hdev);
09fd0de5 1192 hci_dev_unlock(hdev);
1da177e4
LT
1193
1194 if (hdev->flush)
1195 hdev->flush(hdev);
1196
8e87d142 1197 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1198 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1199
1200 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1201 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1202
1203done:
1da177e4
LT
1204 hci_req_unlock(hdev);
1205 hci_dev_put(hdev);
1206 return ret;
1207}
1208
1209int hci_dev_reset_stat(__u16 dev)
1210{
1211 struct hci_dev *hdev;
1212 int ret = 0;
1213
70f23020
AE
1214 hdev = hci_dev_get(dev);
1215 if (!hdev)
1da177e4
LT
1216 return -ENODEV;
1217
1218 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1219
1220 hci_dev_put(hdev);
1221
1222 return ret;
1223}
1224
1225int hci_dev_cmd(unsigned int cmd, void __user *arg)
1226{
1227 struct hci_dev *hdev;
1228 struct hci_dev_req dr;
1229 int err = 0;
1230
1231 if (copy_from_user(&dr, arg, sizeof(dr)))
1232 return -EFAULT;
1233
70f23020
AE
1234 hdev = hci_dev_get(dr.dev_id);
1235 if (!hdev)
1da177e4
LT
1236 return -ENODEV;
1237
1238 switch (cmd) {
1239 case HCISETAUTH:
01178cd4
JH
1240 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1241 HCI_INIT_TIMEOUT);
1da177e4
LT
1242 break;
1243
1244 case HCISETENCRYPT:
1245 if (!lmp_encrypt_capable(hdev)) {
1246 err = -EOPNOTSUPP;
1247 break;
1248 }
1249
1250 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1251 /* Auth must be enabled first */
01178cd4
JH
1252 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1253 HCI_INIT_TIMEOUT);
1da177e4
LT
1254 if (err)
1255 break;
1256 }
1257
01178cd4
JH
1258 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1259 HCI_INIT_TIMEOUT);
1da177e4
LT
1260 break;
1261
1262 case HCISETSCAN:
01178cd4
JH
1263 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1264 HCI_INIT_TIMEOUT);
1da177e4
LT
1265 break;
1266
1da177e4 1267 case HCISETLINKPOL:
01178cd4
JH
1268 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1269 HCI_INIT_TIMEOUT);
1da177e4
LT
1270 break;
1271
1272 case HCISETLINKMODE:
e4e8e37c
MH
1273 hdev->link_mode = ((__u16) dr.dev_opt) &
1274 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1275 break;
1276
1277 case HCISETPTYPE:
1278 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1279 break;
1280
1281 case HCISETACLMTU:
e4e8e37c
MH
1282 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1283 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1284 break;
1285
1286 case HCISETSCOMTU:
e4e8e37c
MH
1287 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1288 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1289 break;
1290
1291 default:
1292 err = -EINVAL;
1293 break;
1294 }
e4e8e37c 1295
1da177e4
LT
1296 hci_dev_put(hdev);
1297 return err;
1298}
1299
1300int hci_get_dev_list(void __user *arg)
1301{
8035ded4 1302 struct hci_dev *hdev;
1da177e4
LT
1303 struct hci_dev_list_req *dl;
1304 struct hci_dev_req *dr;
1da177e4
LT
1305 int n = 0, size, err;
1306 __u16 dev_num;
1307
1308 if (get_user(dev_num, (__u16 __user *) arg))
1309 return -EFAULT;
1310
1311 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1312 return -EINVAL;
1313
1314 size = sizeof(*dl) + dev_num * sizeof(*dr);
1315
70f23020
AE
1316 dl = kzalloc(size, GFP_KERNEL);
1317 if (!dl)
1da177e4
LT
1318 return -ENOMEM;
1319
1320 dr = dl->dev_req;
1321
f20d09d5 1322 read_lock(&hci_dev_list_lock);
8035ded4 1323 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1324 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1325 cancel_delayed_work(&hdev->power_off);
c542a06c 1326
a8b2d5c2
JH
1327 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1328 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1329
1da177e4
LT
1330 (dr + n)->dev_id = hdev->id;
1331 (dr + n)->dev_opt = hdev->flags;
c542a06c 1332
1da177e4
LT
1333 if (++n >= dev_num)
1334 break;
1335 }
f20d09d5 1336 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1337
1338 dl->dev_num = n;
1339 size = sizeof(*dl) + n * sizeof(*dr);
1340
1341 err = copy_to_user(arg, dl, size);
1342 kfree(dl);
1343
1344 return err ? -EFAULT : 0;
1345}
1346
1347int hci_get_dev_info(void __user *arg)
1348{
1349 struct hci_dev *hdev;
1350 struct hci_dev_info di;
1351 int err = 0;
1352
1353 if (copy_from_user(&di, arg, sizeof(di)))
1354 return -EFAULT;
1355
70f23020
AE
1356 hdev = hci_dev_get(di.dev_id);
1357 if (!hdev)
1da177e4
LT
1358 return -ENODEV;
1359
a8b2d5c2 1360 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1361 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1362
a8b2d5c2
JH
1363 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1364 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1365
1da177e4
LT
1366 strcpy(di.name, hdev->name);
1367 di.bdaddr = hdev->bdaddr;
943da25d 1368 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1369 di.flags = hdev->flags;
1370 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1371 if (lmp_bredr_capable(hdev)) {
1372 di.acl_mtu = hdev->acl_mtu;
1373 di.acl_pkts = hdev->acl_pkts;
1374 di.sco_mtu = hdev->sco_mtu;
1375 di.sco_pkts = hdev->sco_pkts;
1376 } else {
1377 di.acl_mtu = hdev->le_mtu;
1378 di.acl_pkts = hdev->le_pkts;
1379 di.sco_mtu = 0;
1380 di.sco_pkts = 0;
1381 }
1da177e4
LT
1382 di.link_policy = hdev->link_policy;
1383 di.link_mode = hdev->link_mode;
1384
1385 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1386 memcpy(&di.features, &hdev->features, sizeof(di.features));
1387
1388 if (copy_to_user(arg, &di, sizeof(di)))
1389 err = -EFAULT;
1390
1391 hci_dev_put(hdev);
1392
1393 return err;
1394}
1395
1396/* ---- Interface to HCI drivers ---- */
1397
611b30f7
MH
1398static int hci_rfkill_set_block(void *data, bool blocked)
1399{
1400 struct hci_dev *hdev = data;
1401
1402 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1403
1404 if (!blocked)
1405 return 0;
1406
1407 hci_dev_do_close(hdev);
1408
1409 return 0;
1410}
1411
1412static const struct rfkill_ops hci_rfkill_ops = {
1413 .set_block = hci_rfkill_set_block,
1414};
1415
ab81cbf9
JH
1416static void hci_power_on(struct work_struct *work)
1417{
1418 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1419
1420 BT_DBG("%s", hdev->name);
1421
1422 if (hci_dev_open(hdev->id) < 0)
1423 return;
1424
a8b2d5c2 1425 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1426 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1427 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1428
a8b2d5c2 1429 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1430 mgmt_index_added(hdev);
ab81cbf9
JH
1431}
1432
1433static void hci_power_off(struct work_struct *work)
1434{
3243553f 1435 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1436 power_off.work);
ab81cbf9
JH
1437
1438 BT_DBG("%s", hdev->name);
1439
8ee56540 1440 hci_dev_do_close(hdev);
ab81cbf9
JH
1441}
1442
16ab91ab
JH
1443static void hci_discov_off(struct work_struct *work)
1444{
1445 struct hci_dev *hdev;
1446 u8 scan = SCAN_PAGE;
1447
1448 hdev = container_of(work, struct hci_dev, discov_off.work);
1449
1450 BT_DBG("%s", hdev->name);
1451
09fd0de5 1452 hci_dev_lock(hdev);
16ab91ab
JH
1453
1454 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1455
1456 hdev->discov_timeout = 0;
1457
09fd0de5 1458 hci_dev_unlock(hdev);
16ab91ab
JH
1459}
1460
2aeb9a1a
JH
1461int hci_uuids_clear(struct hci_dev *hdev)
1462{
4821002c 1463 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1464
4821002c
JH
1465 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1466 list_del(&uuid->list);
2aeb9a1a
JH
1467 kfree(uuid);
1468 }
1469
1470 return 0;
1471}
1472
55ed8ca1
JH
1473int hci_link_keys_clear(struct hci_dev *hdev)
1474{
1475 struct list_head *p, *n;
1476
1477 list_for_each_safe(p, n, &hdev->link_keys) {
1478 struct link_key *key;
1479
1480 key = list_entry(p, struct link_key, list);
1481
1482 list_del(p);
1483 kfree(key);
1484 }
1485
1486 return 0;
1487}
1488
b899efaf
VCG
1489int hci_smp_ltks_clear(struct hci_dev *hdev)
1490{
1491 struct smp_ltk *k, *tmp;
1492
1493 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1494 list_del(&k->list);
1495 kfree(k);
1496 }
1497
1498 return 0;
1499}
1500
55ed8ca1
JH
1501struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1502{
8035ded4 1503 struct link_key *k;
55ed8ca1 1504
8035ded4 1505 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1506 if (bacmp(bdaddr, &k->bdaddr) == 0)
1507 return k;
55ed8ca1
JH
1508
1509 return NULL;
1510}
1511
745c0ce3 1512static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1513 u8 key_type, u8 old_key_type)
d25e28ab
JH
1514{
1515 /* Legacy key */
1516 if (key_type < 0x03)
745c0ce3 1517 return true;
d25e28ab
JH
1518
1519 /* Debug keys are insecure so don't store them persistently */
1520 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1521 return false;
d25e28ab
JH
1522
1523 /* Changed combination key and there's no previous one */
1524 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1525 return false;
d25e28ab
JH
1526
1527 /* Security mode 3 case */
1528 if (!conn)
745c0ce3 1529 return true;
d25e28ab
JH
1530
1531 /* Neither local nor remote side had no-bonding as requirement */
1532 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1533 return true;
d25e28ab
JH
1534
1535 /* Local side had dedicated bonding as requirement */
1536 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1537 return true;
d25e28ab
JH
1538
1539 /* Remote side had dedicated bonding as requirement */
1540 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1541 return true;
d25e28ab
JH
1542
1543 /* If none of the above criteria match, then don't store the key
1544 * persistently */
745c0ce3 1545 return false;
d25e28ab
JH
1546}
1547
c9839a11 1548struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1549{
c9839a11 1550 struct smp_ltk *k;
75d262c2 1551
c9839a11
VCG
1552 list_for_each_entry(k, &hdev->long_term_keys, list) {
1553 if (k->ediv != ediv ||
a8c5fb1a 1554 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1555 continue;
1556
c9839a11 1557 return k;
75d262c2
VCG
1558 }
1559
1560 return NULL;
1561}
75d262c2 1562
c9839a11 1563struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1564 u8 addr_type)
75d262c2 1565{
c9839a11 1566 struct smp_ltk *k;
75d262c2 1567
c9839a11
VCG
1568 list_for_each_entry(k, &hdev->long_term_keys, list)
1569 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1570 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1571 return k;
1572
1573 return NULL;
1574}
75d262c2 1575
d25e28ab 1576int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1577 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1578{
1579 struct link_key *key, *old_key;
745c0ce3
VA
1580 u8 old_key_type;
1581 bool persistent;
55ed8ca1
JH
1582
1583 old_key = hci_find_link_key(hdev, bdaddr);
1584 if (old_key) {
1585 old_key_type = old_key->type;
1586 key = old_key;
1587 } else {
12adcf3a 1588 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1589 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1590 if (!key)
1591 return -ENOMEM;
1592 list_add(&key->list, &hdev->link_keys);
1593 }
1594
6ed93dc6 1595 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1596
d25e28ab
JH
1597 /* Some buggy controller combinations generate a changed
1598 * combination key for legacy pairing even when there's no
1599 * previous key */
1600 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1601 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1602 type = HCI_LK_COMBINATION;
655fe6ec
JH
1603 if (conn)
1604 conn->key_type = type;
1605 }
d25e28ab 1606
55ed8ca1 1607 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1608 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1609 key->pin_len = pin_len;
1610
b6020ba0 1611 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1612 key->type = old_key_type;
4748fed2
JH
1613 else
1614 key->type = type;
1615
4df378a1
JH
1616 if (!new_key)
1617 return 0;
1618
1619 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1620
744cf19e 1621 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1622
6ec5bcad
VA
1623 if (conn)
1624 conn->flush_key = !persistent;
55ed8ca1
JH
1625
1626 return 0;
1627}
1628
c9839a11 1629int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1630 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1631 ediv, u8 rand[8])
75d262c2 1632{
c9839a11 1633 struct smp_ltk *key, *old_key;
75d262c2 1634
c9839a11
VCG
1635 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1636 return 0;
75d262c2 1637
c9839a11
VCG
1638 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1639 if (old_key)
75d262c2 1640 key = old_key;
c9839a11
VCG
1641 else {
1642 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1643 if (!key)
1644 return -ENOMEM;
c9839a11 1645 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1646 }
1647
75d262c2 1648 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1649 key->bdaddr_type = addr_type;
1650 memcpy(key->val, tk, sizeof(key->val));
1651 key->authenticated = authenticated;
1652 key->ediv = ediv;
1653 key->enc_size = enc_size;
1654 key->type = type;
1655 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1656
c9839a11
VCG
1657 if (!new_key)
1658 return 0;
75d262c2 1659
261cc5aa
VCG
1660 if (type & HCI_SMP_LTK)
1661 mgmt_new_ltk(hdev, key, 1);
1662
75d262c2
VCG
1663 return 0;
1664}
1665
55ed8ca1
JH
1666int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1667{
1668 struct link_key *key;
1669
1670 key = hci_find_link_key(hdev, bdaddr);
1671 if (!key)
1672 return -ENOENT;
1673
6ed93dc6 1674 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1675
1676 list_del(&key->list);
1677 kfree(key);
1678
1679 return 0;
1680}
1681
b899efaf
VCG
1682int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1683{
1684 struct smp_ltk *k, *tmp;
1685
1686 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1687 if (bacmp(bdaddr, &k->bdaddr))
1688 continue;
1689
6ed93dc6 1690 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1691
1692 list_del(&k->list);
1693 kfree(k);
1694 }
1695
1696 return 0;
1697}
1698
6bd32326 1699/* HCI command timer function */
bda4f23a 1700static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1701{
1702 struct hci_dev *hdev = (void *) arg;
1703
bda4f23a
AE
1704 if (hdev->sent_cmd) {
1705 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1706 u16 opcode = __le16_to_cpu(sent->opcode);
1707
1708 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1709 } else {
1710 BT_ERR("%s command tx timeout", hdev->name);
1711 }
1712
6bd32326 1713 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1714 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1715}
1716
2763eda6 1717struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1718 bdaddr_t *bdaddr)
2763eda6
SJ
1719{
1720 struct oob_data *data;
1721
1722 list_for_each_entry(data, &hdev->remote_oob_data, list)
1723 if (bacmp(bdaddr, &data->bdaddr) == 0)
1724 return data;
1725
1726 return NULL;
1727}
1728
1729int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1730{
1731 struct oob_data *data;
1732
1733 data = hci_find_remote_oob_data(hdev, bdaddr);
1734 if (!data)
1735 return -ENOENT;
1736
6ed93dc6 1737 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1738
1739 list_del(&data->list);
1740 kfree(data);
1741
1742 return 0;
1743}
1744
1745int hci_remote_oob_data_clear(struct hci_dev *hdev)
1746{
1747 struct oob_data *data, *n;
1748
1749 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1750 list_del(&data->list);
1751 kfree(data);
1752 }
1753
1754 return 0;
1755}
1756
1757int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1758 u8 *randomizer)
2763eda6
SJ
1759{
1760 struct oob_data *data;
1761
1762 data = hci_find_remote_oob_data(hdev, bdaddr);
1763
1764 if (!data) {
1765 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1766 if (!data)
1767 return -ENOMEM;
1768
1769 bacpy(&data->bdaddr, bdaddr);
1770 list_add(&data->list, &hdev->remote_oob_data);
1771 }
1772
1773 memcpy(data->hash, hash, sizeof(data->hash));
1774 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1775
6ed93dc6 1776 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1777
1778 return 0;
1779}
1780
04124681 1781struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1782{
8035ded4 1783 struct bdaddr_list *b;
b2a66aad 1784
8035ded4 1785 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1786 if (bacmp(bdaddr, &b->bdaddr) == 0)
1787 return b;
b2a66aad
AJ
1788
1789 return NULL;
1790}
1791
1792int hci_blacklist_clear(struct hci_dev *hdev)
1793{
1794 struct list_head *p, *n;
1795
1796 list_for_each_safe(p, n, &hdev->blacklist) {
1797 struct bdaddr_list *b;
1798
1799 b = list_entry(p, struct bdaddr_list, list);
1800
1801 list_del(p);
1802 kfree(b);
1803 }
1804
1805 return 0;
1806}
1807
88c1fe4b 1808int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1809{
1810 struct bdaddr_list *entry;
b2a66aad
AJ
1811
1812 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1813 return -EBADF;
1814
5e762444
AJ
1815 if (hci_blacklist_lookup(hdev, bdaddr))
1816 return -EEXIST;
b2a66aad
AJ
1817
1818 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1819 if (!entry)
1820 return -ENOMEM;
b2a66aad
AJ
1821
1822 bacpy(&entry->bdaddr, bdaddr);
1823
1824 list_add(&entry->list, &hdev->blacklist);
1825
88c1fe4b 1826 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1827}
1828
88c1fe4b 1829int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1830{
1831 struct bdaddr_list *entry;
b2a66aad 1832
1ec918ce 1833 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1834 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1835
1836 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1837 if (!entry)
5e762444 1838 return -ENOENT;
b2a66aad
AJ
1839
1840 list_del(&entry->list);
1841 kfree(entry);
1842
88c1fe4b 1843 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1844}
1845
42c6b129 1846static void le_scan_param_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1847{
1848 struct le_scan_params *param = (struct le_scan_params *) opt;
1849 struct hci_cp_le_set_scan_param cp;
1850
1851 memset(&cp, 0, sizeof(cp));
1852 cp.type = param->type;
1853 cp.interval = cpu_to_le16(param->interval);
1854 cp.window = cpu_to_le16(param->window);
1855
42c6b129 1856 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
7ba8b4be
AG
1857}
1858
42c6b129 1859static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1860{
1861 struct hci_cp_le_set_scan_enable cp;
1862
1863 memset(&cp, 0, sizeof(cp));
1864 cp.enable = 1;
0431a43c 1865 cp.filter_dup = 1;
7ba8b4be 1866
42c6b129 1867 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
7ba8b4be
AG
1868}
1869
1870static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1871 u16 window, int timeout)
7ba8b4be
AG
1872{
1873 long timeo = msecs_to_jiffies(3000);
1874 struct le_scan_params param;
1875 int err;
1876
1877 BT_DBG("%s", hdev->name);
1878
1879 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1880 return -EINPROGRESS;
1881
1882 param.type = type;
1883 param.interval = interval;
1884 param.window = window;
1885
1886 hci_req_lock(hdev);
1887
01178cd4
JH
1888 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1889 timeo);
7ba8b4be 1890 if (!err)
01178cd4 1891 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
1892
1893 hci_req_unlock(hdev);
1894
1895 if (err < 0)
1896 return err;
1897
46818ed5
JH
1898 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1899 msecs_to_jiffies(timeout));
7ba8b4be
AG
1900
1901 return 0;
1902}
1903
7dbfac1d
AG
1904int hci_cancel_le_scan(struct hci_dev *hdev)
1905{
1906 BT_DBG("%s", hdev->name);
1907
1908 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1909 return -EALREADY;
1910
1911 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1912 struct hci_cp_le_set_scan_enable cp;
1913
1914 /* Send HCI command to disable LE Scan */
1915 memset(&cp, 0, sizeof(cp));
1916 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1917 }
1918
1919 return 0;
1920}
1921
7ba8b4be
AG
1922static void le_scan_disable_work(struct work_struct *work)
1923{
1924 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1925 le_scan_disable.work);
7ba8b4be
AG
1926 struct hci_cp_le_set_scan_enable cp;
1927
1928 BT_DBG("%s", hdev->name);
1929
1930 memset(&cp, 0, sizeof(cp));
1931
1932 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1933}
1934
28b75a89
AG
1935static void le_scan_work(struct work_struct *work)
1936{
1937 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1938 struct le_scan_params *param = &hdev->le_scan_params;
1939
1940 BT_DBG("%s", hdev->name);
1941
04124681
GP
1942 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1943 param->timeout);
28b75a89
AG
1944}
1945
1946int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1947 int timeout)
28b75a89
AG
1948{
1949 struct le_scan_params *param = &hdev->le_scan_params;
1950
1951 BT_DBG("%s", hdev->name);
1952
f1550478
JH
1953 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1954 return -ENOTSUPP;
1955
28b75a89
AG
1956 if (work_busy(&hdev->le_scan))
1957 return -EINPROGRESS;
1958
1959 param->type = type;
1960 param->interval = interval;
1961 param->window = window;
1962 param->timeout = timeout;
1963
1964 queue_work(system_long_wq, &hdev->le_scan);
1965
1966 return 0;
1967}
1968
9be0dab7
DH
1969/* Alloc HCI device */
1970struct hci_dev *hci_alloc_dev(void)
1971{
1972 struct hci_dev *hdev;
1973
1974 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1975 if (!hdev)
1976 return NULL;
1977
b1b813d4
DH
1978 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1979 hdev->esco_type = (ESCO_HV1);
1980 hdev->link_mode = (HCI_LM_ACCEPT);
1981 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
1982 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1983 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 1984
b1b813d4
DH
1985 hdev->sniff_max_interval = 800;
1986 hdev->sniff_min_interval = 80;
1987
1988 mutex_init(&hdev->lock);
1989 mutex_init(&hdev->req_lock);
1990
1991 INIT_LIST_HEAD(&hdev->mgmt_pending);
1992 INIT_LIST_HEAD(&hdev->blacklist);
1993 INIT_LIST_HEAD(&hdev->uuids);
1994 INIT_LIST_HEAD(&hdev->link_keys);
1995 INIT_LIST_HEAD(&hdev->long_term_keys);
1996 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 1997 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
1998
1999 INIT_WORK(&hdev->rx_work, hci_rx_work);
2000 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2001 INIT_WORK(&hdev->tx_work, hci_tx_work);
2002 INIT_WORK(&hdev->power_on, hci_power_on);
2003 INIT_WORK(&hdev->le_scan, le_scan_work);
2004
b1b813d4
DH
2005 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2006 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2007 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2008
9be0dab7 2009 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
2010 skb_queue_head_init(&hdev->rx_q);
2011 skb_queue_head_init(&hdev->cmd_q);
2012 skb_queue_head_init(&hdev->raw_q);
2013
2014 init_waitqueue_head(&hdev->req_wait_q);
2015
bda4f23a 2016 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2017
b1b813d4
DH
2018 hci_init_sysfs(hdev);
2019 discovery_init(hdev);
9be0dab7
DH
2020
2021 return hdev;
2022}
2023EXPORT_SYMBOL(hci_alloc_dev);
2024
2025/* Free HCI device */
2026void hci_free_dev(struct hci_dev *hdev)
2027{
2028 skb_queue_purge(&hdev->driver_init);
2029
2030 /* will free via device release */
2031 put_device(&hdev->dev);
2032}
2033EXPORT_SYMBOL(hci_free_dev);
2034
1da177e4
LT
2035/* Register HCI device */
2036int hci_register_dev(struct hci_dev *hdev)
2037{
b1b813d4 2038 int id, error;
1da177e4 2039
010666a1 2040 if (!hdev->open || !hdev->close)
1da177e4
LT
2041 return -EINVAL;
2042
08add513
MM
2043 /* Do not allow HCI_AMP devices to register at index 0,
2044 * so the index can be used as the AMP controller ID.
2045 */
3df92b31
SL
2046 switch (hdev->dev_type) {
2047 case HCI_BREDR:
2048 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2049 break;
2050 case HCI_AMP:
2051 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2052 break;
2053 default:
2054 return -EINVAL;
1da177e4 2055 }
8e87d142 2056
3df92b31
SL
2057 if (id < 0)
2058 return id;
2059
1da177e4
LT
2060 sprintf(hdev->name, "hci%d", id);
2061 hdev->id = id;
2d8b3a11
AE
2062
2063 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2064
3df92b31
SL
2065 write_lock(&hci_dev_list_lock);
2066 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2067 write_unlock(&hci_dev_list_lock);
1da177e4 2068
32845eb1 2069 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 2070 WQ_MEM_RECLAIM, 1);
33ca954d
DH
2071 if (!hdev->workqueue) {
2072 error = -ENOMEM;
2073 goto err;
2074 }
f48fd9c8 2075
6ead1bbc
JH
2076 hdev->req_workqueue = alloc_workqueue(hdev->name,
2077 WQ_HIGHPRI | WQ_UNBOUND |
2078 WQ_MEM_RECLAIM, 1);
2079 if (!hdev->req_workqueue) {
2080 destroy_workqueue(hdev->workqueue);
2081 error = -ENOMEM;
2082 goto err;
2083 }
2084
33ca954d
DH
2085 error = hci_add_sysfs(hdev);
2086 if (error < 0)
2087 goto err_wqueue;
1da177e4 2088
611b30f7 2089 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2090 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2091 hdev);
611b30f7
MH
2092 if (hdev->rfkill) {
2093 if (rfkill_register(hdev->rfkill) < 0) {
2094 rfkill_destroy(hdev->rfkill);
2095 hdev->rfkill = NULL;
2096 }
2097 }
2098
a8b2d5c2 2099 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2100
2101 if (hdev->dev_type != HCI_AMP)
2102 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2103
1da177e4 2104 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2105 hci_dev_hold(hdev);
1da177e4 2106
19202573 2107 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2108
1da177e4 2109 return id;
f48fd9c8 2110
33ca954d
DH
2111err_wqueue:
2112 destroy_workqueue(hdev->workqueue);
6ead1bbc 2113 destroy_workqueue(hdev->req_workqueue);
33ca954d 2114err:
3df92b31 2115 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2116 write_lock(&hci_dev_list_lock);
f48fd9c8 2117 list_del(&hdev->list);
f20d09d5 2118 write_unlock(&hci_dev_list_lock);
f48fd9c8 2119
33ca954d 2120 return error;
1da177e4
LT
2121}
2122EXPORT_SYMBOL(hci_register_dev);
2123
2124/* Unregister HCI device */
59735631 2125void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2126{
3df92b31 2127 int i, id;
ef222013 2128
c13854ce 2129 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2130
94324962
JH
2131 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2132
3df92b31
SL
2133 id = hdev->id;
2134
f20d09d5 2135 write_lock(&hci_dev_list_lock);
1da177e4 2136 list_del(&hdev->list);
f20d09d5 2137 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2138
2139 hci_dev_do_close(hdev);
2140
cd4c5391 2141 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2142 kfree_skb(hdev->reassembly[i]);
2143
b9b5ef18
GP
2144 cancel_work_sync(&hdev->power_on);
2145
ab81cbf9 2146 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2147 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2148 hci_dev_lock(hdev);
744cf19e 2149 mgmt_index_removed(hdev);
09fd0de5 2150 hci_dev_unlock(hdev);
56e5cb86 2151 }
ab81cbf9 2152
2e58ef3e
JH
2153 /* mgmt_index_removed should take care of emptying the
2154 * pending list */
2155 BUG_ON(!list_empty(&hdev->mgmt_pending));
2156
1da177e4
LT
2157 hci_notify(hdev, HCI_DEV_UNREG);
2158
611b30f7
MH
2159 if (hdev->rfkill) {
2160 rfkill_unregister(hdev->rfkill);
2161 rfkill_destroy(hdev->rfkill);
2162 }
2163
ce242970 2164 hci_del_sysfs(hdev);
147e2d59 2165
f48fd9c8 2166 destroy_workqueue(hdev->workqueue);
6ead1bbc 2167 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2168
09fd0de5 2169 hci_dev_lock(hdev);
e2e0cacb 2170 hci_blacklist_clear(hdev);
2aeb9a1a 2171 hci_uuids_clear(hdev);
55ed8ca1 2172 hci_link_keys_clear(hdev);
b899efaf 2173 hci_smp_ltks_clear(hdev);
2763eda6 2174 hci_remote_oob_data_clear(hdev);
09fd0de5 2175 hci_dev_unlock(hdev);
e2e0cacb 2176
dc946bd8 2177 hci_dev_put(hdev);
3df92b31
SL
2178
2179 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2180}
2181EXPORT_SYMBOL(hci_unregister_dev);
2182
2183/* Suspend HCI device */
2184int hci_suspend_dev(struct hci_dev *hdev)
2185{
2186 hci_notify(hdev, HCI_DEV_SUSPEND);
2187 return 0;
2188}
2189EXPORT_SYMBOL(hci_suspend_dev);
2190
2191/* Resume HCI device */
2192int hci_resume_dev(struct hci_dev *hdev)
2193{
2194 hci_notify(hdev, HCI_DEV_RESUME);
2195 return 0;
2196}
2197EXPORT_SYMBOL(hci_resume_dev);
2198
76bca880
MH
2199/* Receive frame from HCI drivers */
2200int hci_recv_frame(struct sk_buff *skb)
2201{
2202 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2203 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2204 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2205 kfree_skb(skb);
2206 return -ENXIO;
2207 }
2208
d82603c6 2209 /* Incoming skb */
76bca880
MH
2210 bt_cb(skb)->incoming = 1;
2211
2212 /* Time stamp */
2213 __net_timestamp(skb);
2214
76bca880 2215 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2216 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2217
76bca880
MH
2218 return 0;
2219}
2220EXPORT_SYMBOL(hci_recv_frame);
2221
33e882a5 2222static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2223 int count, __u8 index)
33e882a5
SS
2224{
2225 int len = 0;
2226 int hlen = 0;
2227 int remain = count;
2228 struct sk_buff *skb;
2229 struct bt_skb_cb *scb;
2230
2231 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2232 index >= NUM_REASSEMBLY)
33e882a5
SS
2233 return -EILSEQ;
2234
2235 skb = hdev->reassembly[index];
2236
2237 if (!skb) {
2238 switch (type) {
2239 case HCI_ACLDATA_PKT:
2240 len = HCI_MAX_FRAME_SIZE;
2241 hlen = HCI_ACL_HDR_SIZE;
2242 break;
2243 case HCI_EVENT_PKT:
2244 len = HCI_MAX_EVENT_SIZE;
2245 hlen = HCI_EVENT_HDR_SIZE;
2246 break;
2247 case HCI_SCODATA_PKT:
2248 len = HCI_MAX_SCO_SIZE;
2249 hlen = HCI_SCO_HDR_SIZE;
2250 break;
2251 }
2252
1e429f38 2253 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2254 if (!skb)
2255 return -ENOMEM;
2256
2257 scb = (void *) skb->cb;
2258 scb->expect = hlen;
2259 scb->pkt_type = type;
2260
2261 skb->dev = (void *) hdev;
2262 hdev->reassembly[index] = skb;
2263 }
2264
2265 while (count) {
2266 scb = (void *) skb->cb;
89bb46d0 2267 len = min_t(uint, scb->expect, count);
33e882a5
SS
2268
2269 memcpy(skb_put(skb, len), data, len);
2270
2271 count -= len;
2272 data += len;
2273 scb->expect -= len;
2274 remain = count;
2275
2276 switch (type) {
2277 case HCI_EVENT_PKT:
2278 if (skb->len == HCI_EVENT_HDR_SIZE) {
2279 struct hci_event_hdr *h = hci_event_hdr(skb);
2280 scb->expect = h->plen;
2281
2282 if (skb_tailroom(skb) < scb->expect) {
2283 kfree_skb(skb);
2284 hdev->reassembly[index] = NULL;
2285 return -ENOMEM;
2286 }
2287 }
2288 break;
2289
2290 case HCI_ACLDATA_PKT:
2291 if (skb->len == HCI_ACL_HDR_SIZE) {
2292 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2293 scb->expect = __le16_to_cpu(h->dlen);
2294
2295 if (skb_tailroom(skb) < scb->expect) {
2296 kfree_skb(skb);
2297 hdev->reassembly[index] = NULL;
2298 return -ENOMEM;
2299 }
2300 }
2301 break;
2302
2303 case HCI_SCODATA_PKT:
2304 if (skb->len == HCI_SCO_HDR_SIZE) {
2305 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2306 scb->expect = h->dlen;
2307
2308 if (skb_tailroom(skb) < scb->expect) {
2309 kfree_skb(skb);
2310 hdev->reassembly[index] = NULL;
2311 return -ENOMEM;
2312 }
2313 }
2314 break;
2315 }
2316
2317 if (scb->expect == 0) {
2318 /* Complete frame */
2319
2320 bt_cb(skb)->pkt_type = type;
2321 hci_recv_frame(skb);
2322
2323 hdev->reassembly[index] = NULL;
2324 return remain;
2325 }
2326 }
2327
2328 return remain;
2329}
2330
ef222013
MH
2331int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2332{
f39a3c06
SS
2333 int rem = 0;
2334
ef222013
MH
2335 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2336 return -EILSEQ;
2337
da5f6c37 2338 while (count) {
1e429f38 2339 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2340 if (rem < 0)
2341 return rem;
ef222013 2342
f39a3c06
SS
2343 data += (count - rem);
2344 count = rem;
f81c6224 2345 }
ef222013 2346
f39a3c06 2347 return rem;
ef222013
MH
2348}
2349EXPORT_SYMBOL(hci_recv_fragment);
2350
99811510
SS
2351#define STREAM_REASSEMBLY 0
2352
2353int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2354{
2355 int type;
2356 int rem = 0;
2357
da5f6c37 2358 while (count) {
99811510
SS
2359 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2360
2361 if (!skb) {
2362 struct { char type; } *pkt;
2363
2364 /* Start of the frame */
2365 pkt = data;
2366 type = pkt->type;
2367
2368 data++;
2369 count--;
2370 } else
2371 type = bt_cb(skb)->pkt_type;
2372
1e429f38 2373 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2374 STREAM_REASSEMBLY);
99811510
SS
2375 if (rem < 0)
2376 return rem;
2377
2378 data += (count - rem);
2379 count = rem;
f81c6224 2380 }
99811510
SS
2381
2382 return rem;
2383}
2384EXPORT_SYMBOL(hci_recv_stream_fragment);
2385
1da177e4
LT
2386/* ---- Interface to upper protocols ---- */
2387
1da177e4
LT
2388int hci_register_cb(struct hci_cb *cb)
2389{
2390 BT_DBG("%p name %s", cb, cb->name);
2391
f20d09d5 2392 write_lock(&hci_cb_list_lock);
1da177e4 2393 list_add(&cb->list, &hci_cb_list);
f20d09d5 2394 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2395
2396 return 0;
2397}
2398EXPORT_SYMBOL(hci_register_cb);
2399
2400int hci_unregister_cb(struct hci_cb *cb)
2401{
2402 BT_DBG("%p name %s", cb, cb->name);
2403
f20d09d5 2404 write_lock(&hci_cb_list_lock);
1da177e4 2405 list_del(&cb->list);
f20d09d5 2406 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2407
2408 return 0;
2409}
2410EXPORT_SYMBOL(hci_unregister_cb);
2411
2412static int hci_send_frame(struct sk_buff *skb)
2413{
2414 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2415
2416 if (!hdev) {
2417 kfree_skb(skb);
2418 return -ENODEV;
2419 }
2420
0d48d939 2421 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2422
cd82e61c
MH
2423 /* Time stamp */
2424 __net_timestamp(skb);
1da177e4 2425
cd82e61c
MH
2426 /* Send copy to monitor */
2427 hci_send_to_monitor(hdev, skb);
2428
2429 if (atomic_read(&hdev->promisc)) {
2430 /* Send copy to the sockets */
470fe1b5 2431 hci_send_to_sock(hdev, skb);
1da177e4
LT
2432 }
2433
2434 /* Get rid of skb owner, prior to sending to the driver. */
2435 skb_orphan(skb);
2436
2437 return hdev->send(skb);
2438}
2439
3119ae95
JH
2440void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2441{
2442 skb_queue_head_init(&req->cmd_q);
2443 req->hdev = hdev;
2444}
2445
2446int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2447{
2448 struct hci_dev *hdev = req->hdev;
2449 struct sk_buff *skb;
2450 unsigned long flags;
2451
2452 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2453
2454 /* Do not allow empty requests */
2455 if (skb_queue_empty(&req->cmd_q))
2456 return -EINVAL;
2457
2458 skb = skb_peek_tail(&req->cmd_q);
2459 bt_cb(skb)->req.complete = complete;
2460
2461 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2462 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2463 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2464
2465 queue_work(hdev->workqueue, &hdev->cmd_work);
2466
2467 return 0;
2468}
2469
1ca3a9d0
JH
2470static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2471 u32 plen, void *param)
1da177e4
LT
2472{
2473 int len = HCI_COMMAND_HDR_SIZE + plen;
2474 struct hci_command_hdr *hdr;
2475 struct sk_buff *skb;
2476
1da177e4 2477 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2478 if (!skb)
2479 return NULL;
1da177e4
LT
2480
2481 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2482 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2483 hdr->plen = plen;
2484
2485 if (plen)
2486 memcpy(skb_put(skb, plen), param, plen);
2487
2488 BT_DBG("skb len %d", skb->len);
2489
0d48d939 2490 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2491 skb->dev = (void *) hdev;
c78ae283 2492
1ca3a9d0
JH
2493 return skb;
2494}
2495
2496/* Send HCI command */
2497int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2498{
2499 struct sk_buff *skb;
2500
2501 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2502
2503 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2504 if (!skb) {
2505 BT_ERR("%s no memory for command", hdev->name);
2506 return -ENOMEM;
2507 }
2508
11714b3d
JH
2509 /* Stand-alone HCI commands must be flaged as
2510 * single-command requests.
2511 */
2512 bt_cb(skb)->req.start = true;
2513
1da177e4 2514 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2515 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2516
2517 return 0;
2518}
1da177e4 2519
71c76a17
JH
2520/* Queue a command to an asynchronous HCI request */
2521int hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2522{
2523 struct hci_dev *hdev = req->hdev;
2524 struct sk_buff *skb;
2525
2526 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2527
2528 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2529 if (!skb) {
2530 BT_ERR("%s no memory for command", hdev->name);
2531 return -ENOMEM;
2532 }
2533
2534 if (skb_queue_empty(&req->cmd_q))
2535 bt_cb(skb)->req.start = true;
2536
2537 skb_queue_tail(&req->cmd_q, skb);
2538
2539 return 0;
2540}
2541
1da177e4 2542/* Get data from the previously sent command */
a9de9248 2543void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2544{
2545 struct hci_command_hdr *hdr;
2546
2547 if (!hdev->sent_cmd)
2548 return NULL;
2549
2550 hdr = (void *) hdev->sent_cmd->data;
2551
a9de9248 2552 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2553 return NULL;
2554
f0e09510 2555 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2556
2557 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2558}
2559
2560/* Send ACL data */
2561static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2562{
2563 struct hci_acl_hdr *hdr;
2564 int len = skb->len;
2565
badff6d0
ACM
2566 skb_push(skb, HCI_ACL_HDR_SIZE);
2567 skb_reset_transport_header(skb);
9c70220b 2568 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2569 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2570 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2571}
2572
ee22be7e 2573static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2574 struct sk_buff *skb, __u16 flags)
1da177e4 2575{
ee22be7e 2576 struct hci_conn *conn = chan->conn;
1da177e4
LT
2577 struct hci_dev *hdev = conn->hdev;
2578 struct sk_buff *list;
2579
087bfd99
GP
2580 skb->len = skb_headlen(skb);
2581 skb->data_len = 0;
2582
2583 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2584
2585 switch (hdev->dev_type) {
2586 case HCI_BREDR:
2587 hci_add_acl_hdr(skb, conn->handle, flags);
2588 break;
2589 case HCI_AMP:
2590 hci_add_acl_hdr(skb, chan->handle, flags);
2591 break;
2592 default:
2593 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2594 return;
2595 }
087bfd99 2596
70f23020
AE
2597 list = skb_shinfo(skb)->frag_list;
2598 if (!list) {
1da177e4
LT
2599 /* Non fragmented */
2600 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2601
73d80deb 2602 skb_queue_tail(queue, skb);
1da177e4
LT
2603 } else {
2604 /* Fragmented */
2605 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2606
2607 skb_shinfo(skb)->frag_list = NULL;
2608
2609 /* Queue all fragments atomically */
af3e6359 2610 spin_lock(&queue->lock);
1da177e4 2611
73d80deb 2612 __skb_queue_tail(queue, skb);
e702112f
AE
2613
2614 flags &= ~ACL_START;
2615 flags |= ACL_CONT;
1da177e4
LT
2616 do {
2617 skb = list; list = list->next;
8e87d142 2618
1da177e4 2619 skb->dev = (void *) hdev;
0d48d939 2620 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2621 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2622
2623 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2624
73d80deb 2625 __skb_queue_tail(queue, skb);
1da177e4
LT
2626 } while (list);
2627
af3e6359 2628 spin_unlock(&queue->lock);
1da177e4 2629 }
73d80deb
LAD
2630}
2631
2632void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2633{
ee22be7e 2634 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2635
f0e09510 2636 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2637
2638 skb->dev = (void *) hdev;
73d80deb 2639
ee22be7e 2640 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2641
3eff45ea 2642 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2643}
1da177e4
LT
2644
2645/* Send SCO data */
0d861d8b 2646void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2647{
2648 struct hci_dev *hdev = conn->hdev;
2649 struct hci_sco_hdr hdr;
2650
2651 BT_DBG("%s len %d", hdev->name, skb->len);
2652
aca3192c 2653 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2654 hdr.dlen = skb->len;
2655
badff6d0
ACM
2656 skb_push(skb, HCI_SCO_HDR_SIZE);
2657 skb_reset_transport_header(skb);
9c70220b 2658 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2659
2660 skb->dev = (void *) hdev;
0d48d939 2661 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2662
1da177e4 2663 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2664 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2665}
1da177e4
LT
2666
2667/* ---- HCI TX task (outgoing data) ---- */
2668
2669/* HCI Connection scheduler */
6039aa73
GP
2670static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2671 int *quote)
1da177e4
LT
2672{
2673 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2674 struct hci_conn *conn = NULL, *c;
abc5de8f 2675 unsigned int num = 0, min = ~0;
1da177e4 2676
8e87d142 2677 /* We don't have to lock device here. Connections are always
1da177e4 2678 * added and removed with TX task disabled. */
bf4c6325
GP
2679
2680 rcu_read_lock();
2681
2682 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2683 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2684 continue;
769be974
MH
2685
2686 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2687 continue;
2688
1da177e4
LT
2689 num++;
2690
2691 if (c->sent < min) {
2692 min = c->sent;
2693 conn = c;
2694 }
52087a79
LAD
2695
2696 if (hci_conn_num(hdev, type) == num)
2697 break;
1da177e4
LT
2698 }
2699
bf4c6325
GP
2700 rcu_read_unlock();
2701
1da177e4 2702 if (conn) {
6ed58ec5
VT
2703 int cnt, q;
2704
2705 switch (conn->type) {
2706 case ACL_LINK:
2707 cnt = hdev->acl_cnt;
2708 break;
2709 case SCO_LINK:
2710 case ESCO_LINK:
2711 cnt = hdev->sco_cnt;
2712 break;
2713 case LE_LINK:
2714 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2715 break;
2716 default:
2717 cnt = 0;
2718 BT_ERR("Unknown link type");
2719 }
2720
2721 q = cnt / num;
1da177e4
LT
2722 *quote = q ? q : 1;
2723 } else
2724 *quote = 0;
2725
2726 BT_DBG("conn %p quote %d", conn, *quote);
2727 return conn;
2728}
2729
6039aa73 2730static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2731{
2732 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2733 struct hci_conn *c;
1da177e4 2734
bae1f5d9 2735 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2736
bf4c6325
GP
2737 rcu_read_lock();
2738
1da177e4 2739 /* Kill stalled connections */
bf4c6325 2740 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2741 if (c->type == type && c->sent) {
6ed93dc6
AE
2742 BT_ERR("%s killing stalled connection %pMR",
2743 hdev->name, &c->dst);
bed71748 2744 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2745 }
2746 }
bf4c6325
GP
2747
2748 rcu_read_unlock();
1da177e4
LT
2749}
2750
6039aa73
GP
2751static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2752 int *quote)
1da177e4 2753{
73d80deb
LAD
2754 struct hci_conn_hash *h = &hdev->conn_hash;
2755 struct hci_chan *chan = NULL;
abc5de8f 2756 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2757 struct hci_conn *conn;
73d80deb
LAD
2758 int cnt, q, conn_num = 0;
2759
2760 BT_DBG("%s", hdev->name);
2761
bf4c6325
GP
2762 rcu_read_lock();
2763
2764 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2765 struct hci_chan *tmp;
2766
2767 if (conn->type != type)
2768 continue;
2769
2770 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2771 continue;
2772
2773 conn_num++;
2774
8192edef 2775 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2776 struct sk_buff *skb;
2777
2778 if (skb_queue_empty(&tmp->data_q))
2779 continue;
2780
2781 skb = skb_peek(&tmp->data_q);
2782 if (skb->priority < cur_prio)
2783 continue;
2784
2785 if (skb->priority > cur_prio) {
2786 num = 0;
2787 min = ~0;
2788 cur_prio = skb->priority;
2789 }
2790
2791 num++;
2792
2793 if (conn->sent < min) {
2794 min = conn->sent;
2795 chan = tmp;
2796 }
2797 }
2798
2799 if (hci_conn_num(hdev, type) == conn_num)
2800 break;
2801 }
2802
bf4c6325
GP
2803 rcu_read_unlock();
2804
73d80deb
LAD
2805 if (!chan)
2806 return NULL;
2807
2808 switch (chan->conn->type) {
2809 case ACL_LINK:
2810 cnt = hdev->acl_cnt;
2811 break;
bd1eb66b
AE
2812 case AMP_LINK:
2813 cnt = hdev->block_cnt;
2814 break;
73d80deb
LAD
2815 case SCO_LINK:
2816 case ESCO_LINK:
2817 cnt = hdev->sco_cnt;
2818 break;
2819 case LE_LINK:
2820 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2821 break;
2822 default:
2823 cnt = 0;
2824 BT_ERR("Unknown link type");
2825 }
2826
2827 q = cnt / num;
2828 *quote = q ? q : 1;
2829 BT_DBG("chan %p quote %d", chan, *quote);
2830 return chan;
2831}
2832
02b20f0b
LAD
2833static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2834{
2835 struct hci_conn_hash *h = &hdev->conn_hash;
2836 struct hci_conn *conn;
2837 int num = 0;
2838
2839 BT_DBG("%s", hdev->name);
2840
bf4c6325
GP
2841 rcu_read_lock();
2842
2843 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2844 struct hci_chan *chan;
2845
2846 if (conn->type != type)
2847 continue;
2848
2849 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2850 continue;
2851
2852 num++;
2853
8192edef 2854 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2855 struct sk_buff *skb;
2856
2857 if (chan->sent) {
2858 chan->sent = 0;
2859 continue;
2860 }
2861
2862 if (skb_queue_empty(&chan->data_q))
2863 continue;
2864
2865 skb = skb_peek(&chan->data_q);
2866 if (skb->priority >= HCI_PRIO_MAX - 1)
2867 continue;
2868
2869 skb->priority = HCI_PRIO_MAX - 1;
2870
2871 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2872 skb->priority);
02b20f0b
LAD
2873 }
2874
2875 if (hci_conn_num(hdev, type) == num)
2876 break;
2877 }
bf4c6325
GP
2878
2879 rcu_read_unlock();
2880
02b20f0b
LAD
2881}
2882
b71d385a
AE
2883static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2884{
2885 /* Calculate count of blocks used by this packet */
2886 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2887}
2888
6039aa73 2889static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2890{
1da177e4
LT
2891 if (!test_bit(HCI_RAW, &hdev->flags)) {
2892 /* ACL tx timeout must be longer than maximum
2893 * link supervision timeout (40.9 seconds) */
63d2bc1b 2894 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2895 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2896 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2897 }
63d2bc1b 2898}
1da177e4 2899
6039aa73 2900static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2901{
2902 unsigned int cnt = hdev->acl_cnt;
2903 struct hci_chan *chan;
2904 struct sk_buff *skb;
2905 int quote;
2906
2907 __check_timeout(hdev, cnt);
04837f64 2908
73d80deb 2909 while (hdev->acl_cnt &&
a8c5fb1a 2910 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2911 u32 priority = (skb_peek(&chan->data_q))->priority;
2912 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2913 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2914 skb->len, skb->priority);
73d80deb 2915
ec1cce24
LAD
2916 /* Stop if priority has changed */
2917 if (skb->priority < priority)
2918 break;
2919
2920 skb = skb_dequeue(&chan->data_q);
2921
73d80deb 2922 hci_conn_enter_active_mode(chan->conn,
04124681 2923 bt_cb(skb)->force_active);
04837f64 2924
1da177e4
LT
2925 hci_send_frame(skb);
2926 hdev->acl_last_tx = jiffies;
2927
2928 hdev->acl_cnt--;
73d80deb
LAD
2929 chan->sent++;
2930 chan->conn->sent++;
1da177e4
LT
2931 }
2932 }
02b20f0b
LAD
2933
2934 if (cnt != hdev->acl_cnt)
2935 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2936}
2937
6039aa73 2938static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2939{
63d2bc1b 2940 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2941 struct hci_chan *chan;
2942 struct sk_buff *skb;
2943 int quote;
bd1eb66b 2944 u8 type;
b71d385a 2945
63d2bc1b 2946 __check_timeout(hdev, cnt);
b71d385a 2947
bd1eb66b
AE
2948 BT_DBG("%s", hdev->name);
2949
2950 if (hdev->dev_type == HCI_AMP)
2951 type = AMP_LINK;
2952 else
2953 type = ACL_LINK;
2954
b71d385a 2955 while (hdev->block_cnt > 0 &&
bd1eb66b 2956 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
2957 u32 priority = (skb_peek(&chan->data_q))->priority;
2958 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2959 int blocks;
2960
2961 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2962 skb->len, skb->priority);
b71d385a
AE
2963
2964 /* Stop if priority has changed */
2965 if (skb->priority < priority)
2966 break;
2967
2968 skb = skb_dequeue(&chan->data_q);
2969
2970 blocks = __get_blocks(hdev, skb);
2971 if (blocks > hdev->block_cnt)
2972 return;
2973
2974 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2975 bt_cb(skb)->force_active);
b71d385a
AE
2976
2977 hci_send_frame(skb);
2978 hdev->acl_last_tx = jiffies;
2979
2980 hdev->block_cnt -= blocks;
2981 quote -= blocks;
2982
2983 chan->sent += blocks;
2984 chan->conn->sent += blocks;
2985 }
2986 }
2987
2988 if (cnt != hdev->block_cnt)
bd1eb66b 2989 hci_prio_recalculate(hdev, type);
b71d385a
AE
2990}
2991
6039aa73 2992static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
2993{
2994 BT_DBG("%s", hdev->name);
2995
bd1eb66b
AE
2996 /* No ACL link over BR/EDR controller */
2997 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2998 return;
2999
3000 /* No AMP link over AMP controller */
3001 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3002 return;
3003
3004 switch (hdev->flow_ctl_mode) {
3005 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3006 hci_sched_acl_pkt(hdev);
3007 break;
3008
3009 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3010 hci_sched_acl_blk(hdev);
3011 break;
3012 }
3013}
3014
1da177e4 3015/* Schedule SCO */
6039aa73 3016static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3017{
3018 struct hci_conn *conn;
3019 struct sk_buff *skb;
3020 int quote;
3021
3022 BT_DBG("%s", hdev->name);
3023
52087a79
LAD
3024 if (!hci_conn_num(hdev, SCO_LINK))
3025 return;
3026
1da177e4
LT
3027 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3028 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3029 BT_DBG("skb %p len %d", skb, skb->len);
3030 hci_send_frame(skb);
3031
3032 conn->sent++;
3033 if (conn->sent == ~0)
3034 conn->sent = 0;
3035 }
3036 }
3037}
3038
6039aa73 3039static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3040{
3041 struct hci_conn *conn;
3042 struct sk_buff *skb;
3043 int quote;
3044
3045 BT_DBG("%s", hdev->name);
3046
52087a79
LAD
3047 if (!hci_conn_num(hdev, ESCO_LINK))
3048 return;
3049
8fc9ced3
GP
3050 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3051 &quote))) {
b6a0dc82
MH
3052 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3053 BT_DBG("skb %p len %d", skb, skb->len);
3054 hci_send_frame(skb);
3055
3056 conn->sent++;
3057 if (conn->sent == ~0)
3058 conn->sent = 0;
3059 }
3060 }
3061}
3062
6039aa73 3063static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3064{
73d80deb 3065 struct hci_chan *chan;
6ed58ec5 3066 struct sk_buff *skb;
02b20f0b 3067 int quote, cnt, tmp;
6ed58ec5
VT
3068
3069 BT_DBG("%s", hdev->name);
3070
52087a79
LAD
3071 if (!hci_conn_num(hdev, LE_LINK))
3072 return;
3073
6ed58ec5
VT
3074 if (!test_bit(HCI_RAW, &hdev->flags)) {
3075 /* LE tx timeout must be longer than maximum
3076 * link supervision timeout (40.9 seconds) */
bae1f5d9 3077 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3078 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3079 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3080 }
3081
3082 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3083 tmp = cnt;
73d80deb 3084 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3085 u32 priority = (skb_peek(&chan->data_q))->priority;
3086 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3087 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3088 skb->len, skb->priority);
6ed58ec5 3089
ec1cce24
LAD
3090 /* Stop if priority has changed */
3091 if (skb->priority < priority)
3092 break;
3093
3094 skb = skb_dequeue(&chan->data_q);
3095
6ed58ec5
VT
3096 hci_send_frame(skb);
3097 hdev->le_last_tx = jiffies;
3098
3099 cnt--;
73d80deb
LAD
3100 chan->sent++;
3101 chan->conn->sent++;
6ed58ec5
VT
3102 }
3103 }
73d80deb 3104
6ed58ec5
VT
3105 if (hdev->le_pkts)
3106 hdev->le_cnt = cnt;
3107 else
3108 hdev->acl_cnt = cnt;
02b20f0b
LAD
3109
3110 if (cnt != tmp)
3111 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3112}
3113
3eff45ea 3114static void hci_tx_work(struct work_struct *work)
1da177e4 3115{
3eff45ea 3116 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3117 struct sk_buff *skb;
3118
6ed58ec5 3119 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3120 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3121
3122 /* Schedule queues and send stuff to HCI driver */
3123
3124 hci_sched_acl(hdev);
3125
3126 hci_sched_sco(hdev);
3127
b6a0dc82
MH
3128 hci_sched_esco(hdev);
3129
6ed58ec5
VT
3130 hci_sched_le(hdev);
3131
1da177e4
LT
3132 /* Send next queued raw (unknown type) packet */
3133 while ((skb = skb_dequeue(&hdev->raw_q)))
3134 hci_send_frame(skb);
1da177e4
LT
3135}
3136
25985edc 3137/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3138
3139/* ACL data packet */
6039aa73 3140static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3141{
3142 struct hci_acl_hdr *hdr = (void *) skb->data;
3143 struct hci_conn *conn;
3144 __u16 handle, flags;
3145
3146 skb_pull(skb, HCI_ACL_HDR_SIZE);
3147
3148 handle = __le16_to_cpu(hdr->handle);
3149 flags = hci_flags(handle);
3150 handle = hci_handle(handle);
3151
f0e09510 3152 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3153 handle, flags);
1da177e4
LT
3154
3155 hdev->stat.acl_rx++;
3156
3157 hci_dev_lock(hdev);
3158 conn = hci_conn_hash_lookup_handle(hdev, handle);
3159 hci_dev_unlock(hdev);
8e87d142 3160
1da177e4 3161 if (conn) {
65983fc7 3162 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3163
1da177e4 3164 /* Send to upper protocol */
686ebf28
UF
3165 l2cap_recv_acldata(conn, skb, flags);
3166 return;
1da177e4 3167 } else {
8e87d142 3168 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3169 hdev->name, handle);
1da177e4
LT
3170 }
3171
3172 kfree_skb(skb);
3173}
3174
3175/* SCO data packet */
6039aa73 3176static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3177{
3178 struct hci_sco_hdr *hdr = (void *) skb->data;
3179 struct hci_conn *conn;
3180 __u16 handle;
3181
3182 skb_pull(skb, HCI_SCO_HDR_SIZE);
3183
3184 handle = __le16_to_cpu(hdr->handle);
3185
f0e09510 3186 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3187
3188 hdev->stat.sco_rx++;
3189
3190 hci_dev_lock(hdev);
3191 conn = hci_conn_hash_lookup_handle(hdev, handle);
3192 hci_dev_unlock(hdev);
3193
3194 if (conn) {
1da177e4 3195 /* Send to upper protocol */
686ebf28
UF
3196 sco_recv_scodata(conn, skb);
3197 return;
1da177e4 3198 } else {
8e87d142 3199 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3200 hdev->name, handle);
1da177e4
LT
3201 }
3202
3203 kfree_skb(skb);
3204}
3205
9238f36a
JH
3206static bool hci_req_is_complete(struct hci_dev *hdev)
3207{
3208 struct sk_buff *skb;
3209
3210 skb = skb_peek(&hdev->cmd_q);
3211 if (!skb)
3212 return true;
3213
3214 return bt_cb(skb)->req.start;
3215}
3216
42c6b129
JH
3217static void hci_resend_last(struct hci_dev *hdev)
3218{
3219 struct hci_command_hdr *sent;
3220 struct sk_buff *skb;
3221 u16 opcode;
3222
3223 if (!hdev->sent_cmd)
3224 return;
3225
3226 sent = (void *) hdev->sent_cmd->data;
3227 opcode = __le16_to_cpu(sent->opcode);
3228 if (opcode == HCI_OP_RESET)
3229 return;
3230
3231 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3232 if (!skb)
3233 return;
3234
3235 skb_queue_head(&hdev->cmd_q, skb);
3236 queue_work(hdev->workqueue, &hdev->cmd_work);
3237}
3238
9238f36a
JH
3239void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3240{
3241 hci_req_complete_t req_complete = NULL;
3242 struct sk_buff *skb;
3243 unsigned long flags;
3244
3245 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3246
42c6b129
JH
3247 /* If the completed command doesn't match the last one that was
3248 * sent we need to do special handling of it.
9238f36a 3249 */
42c6b129
JH
3250 if (!hci_sent_cmd_data(hdev, opcode)) {
3251 /* Some CSR based controllers generate a spontaneous
3252 * reset complete event during init and any pending
3253 * command will never be completed. In such a case we
3254 * need to resend whatever was the last sent
3255 * command.
3256 */
3257 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3258 hci_resend_last(hdev);
3259
9238f36a 3260 return;
42c6b129 3261 }
9238f36a
JH
3262
3263 /* If the command succeeded and there's still more commands in
3264 * this request the request is not yet complete.
3265 */
3266 if (!status && !hci_req_is_complete(hdev))
3267 return;
3268
3269 /* If this was the last command in a request the complete
3270 * callback would be found in hdev->sent_cmd instead of the
3271 * command queue (hdev->cmd_q).
3272 */
3273 if (hdev->sent_cmd) {
3274 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3275 if (req_complete)
3276 goto call_complete;
3277 }
3278
3279 /* Remove all pending commands belonging to this request */
3280 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3281 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3282 if (bt_cb(skb)->req.start) {
3283 __skb_queue_head(&hdev->cmd_q, skb);
3284 break;
3285 }
3286
3287 req_complete = bt_cb(skb)->req.complete;
3288 kfree_skb(skb);
3289 }
3290 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3291
3292call_complete:
3293 if (req_complete)
3294 req_complete(hdev, status);
3295}
3296
3297void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3298{
3299 hci_req_complete_t req_complete = NULL;
3300
3301 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3302
3303 if (status) {
3304 hci_req_cmd_complete(hdev, opcode, status);
3305 return;
3306 }
3307
3308 /* No need to handle success status if there are more commands */
3309 if (!hci_req_is_complete(hdev))
3310 return;
3311
3312 if (hdev->sent_cmd)
3313 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3314
3315 /* If the request doesn't have a complete callback or there
3316 * are other commands/requests in the hdev queue we consider
3317 * this request as completed.
3318 */
3319 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3320 hci_req_cmd_complete(hdev, opcode, status);
3321}
3322
b78752cc 3323static void hci_rx_work(struct work_struct *work)
1da177e4 3324{
b78752cc 3325 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3326 struct sk_buff *skb;
3327
3328 BT_DBG("%s", hdev->name);
3329
1da177e4 3330 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3331 /* Send copy to monitor */
3332 hci_send_to_monitor(hdev, skb);
3333
1da177e4
LT
3334 if (atomic_read(&hdev->promisc)) {
3335 /* Send copy to the sockets */
470fe1b5 3336 hci_send_to_sock(hdev, skb);
1da177e4
LT
3337 }
3338
3339 if (test_bit(HCI_RAW, &hdev->flags)) {
3340 kfree_skb(skb);
3341 continue;
3342 }
3343
3344 if (test_bit(HCI_INIT, &hdev->flags)) {
3345 /* Don't process data packets in this states. */
0d48d939 3346 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3347 case HCI_ACLDATA_PKT:
3348 case HCI_SCODATA_PKT:
3349 kfree_skb(skb);
3350 continue;
3ff50b79 3351 }
1da177e4
LT
3352 }
3353
3354 /* Process frame */
0d48d939 3355 switch (bt_cb(skb)->pkt_type) {
1da177e4 3356 case HCI_EVENT_PKT:
b78752cc 3357 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3358 hci_event_packet(hdev, skb);
3359 break;
3360
3361 case HCI_ACLDATA_PKT:
3362 BT_DBG("%s ACL data packet", hdev->name);
3363 hci_acldata_packet(hdev, skb);
3364 break;
3365
3366 case HCI_SCODATA_PKT:
3367 BT_DBG("%s SCO data packet", hdev->name);
3368 hci_scodata_packet(hdev, skb);
3369 break;
3370
3371 default:
3372 kfree_skb(skb);
3373 break;
3374 }
3375 }
1da177e4
LT
3376}
3377
c347b765 3378static void hci_cmd_work(struct work_struct *work)
1da177e4 3379{
c347b765 3380 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3381 struct sk_buff *skb;
3382
2104786b
AE
3383 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3384 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3385
1da177e4 3386 /* Send queued commands */
5a08ecce
AE
3387 if (atomic_read(&hdev->cmd_cnt)) {
3388 skb = skb_dequeue(&hdev->cmd_q);
3389 if (!skb)
3390 return;
3391
7585b97a 3392 kfree_skb(hdev->sent_cmd);
1da177e4 3393
70f23020
AE
3394 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3395 if (hdev->sent_cmd) {
1da177e4
LT
3396 atomic_dec(&hdev->cmd_cnt);
3397 hci_send_frame(skb);
7bdb8a5c
SJ
3398 if (test_bit(HCI_RESET, &hdev->flags))
3399 del_timer(&hdev->cmd_timer);
3400 else
3401 mod_timer(&hdev->cmd_timer,
5f246e89 3402 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3403 } else {
3404 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3405 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3406 }
3407 }
3408}
2519a1fc
AG
3409
3410int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3411{
3412 /* General inquiry access code (GIAC) */
3413 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3414 struct hci_cp_inquiry cp;
3415
3416 BT_DBG("%s", hdev->name);
3417
3418 if (test_bit(HCI_INQUIRY, &hdev->flags))
3419 return -EINPROGRESS;
3420
4663262c
JH
3421 inquiry_cache_flush(hdev);
3422
2519a1fc
AG
3423 memset(&cp, 0, sizeof(cp));
3424 memcpy(&cp.lap, lap, sizeof(cp.lap));
3425 cp.length = length;
3426
3427 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3428}
023d5049
AG
3429
3430int hci_cancel_inquiry(struct hci_dev *hdev)
3431{
3432 BT_DBG("%s", hdev->name);
3433
3434 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 3435 return -EALREADY;
023d5049
AG
3436
3437 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3438}
31f7956c
AG
3439
3440u8 bdaddr_to_le(u8 bdaddr_type)
3441{
3442 switch (bdaddr_type) {
3443 case BDADDR_LE_PUBLIC:
3444 return ADDR_LE_DEV_PUBLIC;
3445
3446 default:
3447 /* Fallback to LE Random address type */
3448 return ADDR_LE_DEV_RANDOM;
3449 }
3450}