Bluetooth: Fix __hci_req_sync
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
01178cd4 83static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
84 void (*func)(struct hci_request *req,
85 unsigned long opt),
01178cd4 86 unsigned long opt, __u32 timeout)
1da177e4 87{
42c6b129 88 struct hci_request req;
1da177e4
LT
89 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
42c6b129
JH
94 hci_req_init(&req, hdev);
95
1da177e4
LT
96 hdev->req_status = HCI_REQ_PEND;
97
42c6b129 98 func(&req, opt);
53cce22d 99
42c6b129
JH
100 err = hci_req_run(&req, hci_req_sync_complete);
101 if (err < 0) {
53cce22d 102 hdev->req_status = 0;
42c6b129
JH
103 /* req_run will fail if the request did not add any
104 * commands to the queue, something that can happen when
105 * a request with conditionals doesn't trigger any
106 * commands to be sent. This is normal behavior and
107 * should not trigger an error return.
108 */
109 return 0;
53cce22d
JH
110 }
111
bc4445c7
AG
112 add_wait_queue(&hdev->req_wait_q, &wait);
113 set_current_state(TASK_INTERRUPTIBLE);
114
1da177e4
LT
115 schedule_timeout(timeout);
116
117 remove_wait_queue(&hdev->req_wait_q, &wait);
118
119 if (signal_pending(current))
120 return -EINTR;
121
122 switch (hdev->req_status) {
123 case HCI_REQ_DONE:
e175072f 124 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
125 break;
126
127 case HCI_REQ_CANCELED:
128 err = -hdev->req_result;
129 break;
130
131 default:
132 err = -ETIMEDOUT;
133 break;
3ff50b79 134 }
1da177e4 135
a5040efa 136 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
137
138 BT_DBG("%s end: err %d", hdev->name, err);
139
140 return err;
141}
142
01178cd4 143static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
144 void (*req)(struct hci_request *req,
145 unsigned long opt),
01178cd4 146 unsigned long opt, __u32 timeout)
1da177e4
LT
147{
148 int ret;
149
7c6a329e
MH
150 if (!test_bit(HCI_UP, &hdev->flags))
151 return -ENETDOWN;
152
1da177e4
LT
153 /* Serialize all requests */
154 hci_req_lock(hdev);
01178cd4 155 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
156 hci_req_unlock(hdev);
157
158 return ret;
159}
160
42c6b129 161static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 162{
42c6b129 163 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
164
165 /* Reset device */
42c6b129
JH
166 set_bit(HCI_RESET, &req->hdev->flags);
167 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
168}
169
42c6b129 170static void bredr_init(struct hci_request *req)
1da177e4 171{
42c6b129 172 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 173
1da177e4 174 /* Read Local Supported Features */
42c6b129 175 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 176
1143e5a6 177 /* Read Local Version */
42c6b129 178 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
179
180 /* Read BD Address */
42c6b129 181 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
182}
183
42c6b129 184static void amp_init(struct hci_request *req)
e61ef499 185{
42c6b129 186 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 187
e61ef499 188 /* Read Local Version */
42c6b129 189 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
190
191 /* Read Local AMP Info */
42c6b129 192 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
193
194 /* Read Data Blk size */
42c6b129 195 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
196}
197
42c6b129 198static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 199{
42c6b129
JH
200 struct hci_dev *hdev = req->hdev;
201 struct hci_request init_req;
e61ef499
AE
202 struct sk_buff *skb;
203
204 BT_DBG("%s %ld", hdev->name, opt);
205
206 /* Driver initialization */
207
42c6b129
JH
208 hci_req_init(&init_req, hdev);
209
e61ef499
AE
210 /* Special commands */
211 while ((skb = skb_dequeue(&hdev->driver_init))) {
212 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
213 skb->dev = (void *) hdev;
214
42c6b129
JH
215 if (skb_queue_empty(&init_req.cmd_q))
216 bt_cb(skb)->req.start = true;
217
218 skb_queue_tail(&init_req.cmd_q, skb);
e61ef499
AE
219 }
220 skb_queue_purge(&hdev->driver_init);
221
42c6b129
JH
222 hci_req_run(&init_req, NULL);
223
11778716
AE
224 /* Reset */
225 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 226 hci_reset_req(req, 0);
11778716 227
e61ef499
AE
228 switch (hdev->dev_type) {
229 case HCI_BREDR:
42c6b129 230 bredr_init(req);
e61ef499
AE
231 break;
232
233 case HCI_AMP:
42c6b129 234 amp_init(req);
e61ef499
AE
235 break;
236
237 default:
238 BT_ERR("Unknown device type %d", hdev->dev_type);
239 break;
240 }
e61ef499
AE
241}
242
42c6b129 243static void bredr_setup(struct hci_request *req)
2177bab5
JH
244{
245 struct hci_cp_delete_stored_link_key cp;
246 __le16 param;
247 __u8 flt_type;
248
249 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 250 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
251
252 /* Read Class of Device */
42c6b129 253 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
254
255 /* Read Local Name */
42c6b129 256 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
257
258 /* Read Voice Setting */
42c6b129 259 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
260
261 /* Clear Event Filters */
262 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 263 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
264
265 /* Connection accept timeout ~20 secs */
266 param = __constant_cpu_to_le16(0x7d00);
42c6b129 267 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
268
269 bacpy(&cp.bdaddr, BDADDR_ANY);
270 cp.delete_all = 0x01;
42c6b129 271 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
2177bab5
JH
272}
273
42c6b129 274static void le_setup(struct hci_request *req)
2177bab5
JH
275{
276 /* Read LE Buffer Size */
42c6b129 277 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
278
279 /* Read LE Local Supported Features */
42c6b129 280 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
281
282 /* Read LE Advertising Channel TX Power */
42c6b129 283 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
284
285 /* Read LE White List Size */
42c6b129 286 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
287
288 /* Read LE Supported States */
42c6b129 289 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
2177bab5
JH
290}
291
292static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
293{
294 if (lmp_ext_inq_capable(hdev))
295 return 0x02;
296
297 if (lmp_inq_rssi_capable(hdev))
298 return 0x01;
299
300 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
301 hdev->lmp_subver == 0x0757)
302 return 0x01;
303
304 if (hdev->manufacturer == 15) {
305 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
306 return 0x01;
307 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
308 return 0x01;
309 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
310 return 0x01;
311 }
312
313 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
314 hdev->lmp_subver == 0x1805)
315 return 0x01;
316
317 return 0x00;
318}
319
42c6b129 320static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
321{
322 u8 mode;
323
42c6b129 324 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 325
42c6b129 326 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
327}
328
42c6b129 329static void hci_setup_event_mask(struct hci_request *req)
2177bab5 330{
42c6b129
JH
331 struct hci_dev *hdev = req->hdev;
332
2177bab5
JH
333 /* The second byte is 0xff instead of 0x9f (two reserved bits
334 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
335 * command otherwise.
336 */
337 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
338
339 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
340 * any event mask for pre 1.2 devices.
341 */
342 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
343 return;
344
345 if (lmp_bredr_capable(hdev)) {
346 events[4] |= 0x01; /* Flow Specification Complete */
347 events[4] |= 0x02; /* Inquiry Result with RSSI */
348 events[4] |= 0x04; /* Read Remote Extended Features Complete */
349 events[5] |= 0x08; /* Synchronous Connection Complete */
350 events[5] |= 0x10; /* Synchronous Connection Changed */
351 }
352
353 if (lmp_inq_rssi_capable(hdev))
354 events[4] |= 0x02; /* Inquiry Result with RSSI */
355
356 if (lmp_sniffsubr_capable(hdev))
357 events[5] |= 0x20; /* Sniff Subrating */
358
359 if (lmp_pause_enc_capable(hdev))
360 events[5] |= 0x80; /* Encryption Key Refresh Complete */
361
362 if (lmp_ext_inq_capable(hdev))
363 events[5] |= 0x40; /* Extended Inquiry Result */
364
365 if (lmp_no_flush_capable(hdev))
366 events[7] |= 0x01; /* Enhanced Flush Complete */
367
368 if (lmp_lsto_capable(hdev))
369 events[6] |= 0x80; /* Link Supervision Timeout Changed */
370
371 if (lmp_ssp_capable(hdev)) {
372 events[6] |= 0x01; /* IO Capability Request */
373 events[6] |= 0x02; /* IO Capability Response */
374 events[6] |= 0x04; /* User Confirmation Request */
375 events[6] |= 0x08; /* User Passkey Request */
376 events[6] |= 0x10; /* Remote OOB Data Request */
377 events[6] |= 0x20; /* Simple Pairing Complete */
378 events[7] |= 0x04; /* User Passkey Notification */
379 events[7] |= 0x08; /* Keypress Notification */
380 events[7] |= 0x10; /* Remote Host Supported
381 * Features Notification
382 */
383 }
384
385 if (lmp_le_capable(hdev))
386 events[7] |= 0x20; /* LE Meta-Event */
387
42c6b129 388 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
389
390 if (lmp_le_capable(hdev)) {
391 memset(events, 0, sizeof(events));
392 events[0] = 0x1f;
42c6b129
JH
393 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
394 sizeof(events), events);
2177bab5
JH
395 }
396}
397
42c6b129 398static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 399{
42c6b129
JH
400 struct hci_dev *hdev = req->hdev;
401
2177bab5 402 if (lmp_bredr_capable(hdev))
42c6b129 403 bredr_setup(req);
2177bab5
JH
404
405 if (lmp_le_capable(hdev))
42c6b129 406 le_setup(req);
2177bab5 407
42c6b129 408 hci_setup_event_mask(req);
2177bab5
JH
409
410 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 411 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
412
413 if (lmp_ssp_capable(hdev)) {
414 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
415 u8 mode = 0x01;
42c6b129
JH
416 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
417 sizeof(mode), &mode);
2177bab5
JH
418 } else {
419 struct hci_cp_write_eir cp;
420
421 memset(hdev->eir, 0, sizeof(hdev->eir));
422 memset(&cp, 0, sizeof(cp));
423
42c6b129 424 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
425 }
426 }
427
428 if (lmp_inq_rssi_capable(hdev))
42c6b129 429 hci_setup_inquiry_mode(req);
2177bab5
JH
430
431 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 432 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
433
434 if (lmp_ext_feat_capable(hdev)) {
435 struct hci_cp_read_local_ext_features cp;
436
437 cp.page = 0x01;
42c6b129
JH
438 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
439 sizeof(cp), &cp);
2177bab5
JH
440 }
441
442 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
443 u8 enable = 1;
42c6b129
JH
444 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
445 &enable);
2177bab5
JH
446 }
447}
448
42c6b129 449static void hci_setup_link_policy(struct hci_request *req)
2177bab5 450{
42c6b129 451 struct hci_dev *hdev = req->hdev;
2177bab5
JH
452 struct hci_cp_write_def_link_policy cp;
453 u16 link_policy = 0;
454
455 if (lmp_rswitch_capable(hdev))
456 link_policy |= HCI_LP_RSWITCH;
457 if (lmp_hold_capable(hdev))
458 link_policy |= HCI_LP_HOLD;
459 if (lmp_sniff_capable(hdev))
460 link_policy |= HCI_LP_SNIFF;
461 if (lmp_park_capable(hdev))
462 link_policy |= HCI_LP_PARK;
463
464 cp.policy = cpu_to_le16(link_policy);
42c6b129 465 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
466}
467
42c6b129 468static void hci_set_le_support(struct hci_request *req)
2177bab5 469{
42c6b129 470 struct hci_dev *hdev = req->hdev;
2177bab5
JH
471 struct hci_cp_write_le_host_supported cp;
472
473 memset(&cp, 0, sizeof(cp));
474
475 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
476 cp.le = 0x01;
477 cp.simul = lmp_le_br_capable(hdev);
478 }
479
480 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
481 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
482 &cp);
2177bab5
JH
483}
484
42c6b129 485static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 486{
42c6b129
JH
487 struct hci_dev *hdev = req->hdev;
488
2177bab5 489 if (hdev->commands[5] & 0x10)
42c6b129 490 hci_setup_link_policy(req);
2177bab5
JH
491
492 if (lmp_le_capable(hdev))
42c6b129 493 hci_set_le_support(req);
2177bab5
JH
494}
495
496static int __hci_init(struct hci_dev *hdev)
497{
498 int err;
499
500 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
501 if (err < 0)
502 return err;
503
504 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
505 * BR/EDR/LE type controllers. AMP controllers only need the
506 * first stage init.
507 */
508 if (hdev->dev_type != HCI_BREDR)
509 return 0;
510
511 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
512 if (err < 0)
513 return err;
514
515 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
516}
517
42c6b129 518static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
519{
520 __u8 scan = opt;
521
42c6b129 522 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
523
524 /* Inquiry and Page scans */
42c6b129 525 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
526}
527
42c6b129 528static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
529{
530 __u8 auth = opt;
531
42c6b129 532 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
533
534 /* Authentication */
42c6b129 535 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
536}
537
42c6b129 538static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
539{
540 __u8 encrypt = opt;
541
42c6b129 542 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 543
e4e8e37c 544 /* Encryption */
42c6b129 545 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
546}
547
42c6b129 548static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
549{
550 __le16 policy = cpu_to_le16(opt);
551
42c6b129 552 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
553
554 /* Default link policy */
42c6b129 555 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
556}
557
8e87d142 558/* Get HCI device by index.
1da177e4
LT
559 * Device is held on return. */
560struct hci_dev *hci_dev_get(int index)
561{
8035ded4 562 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
563
564 BT_DBG("%d", index);
565
566 if (index < 0)
567 return NULL;
568
569 read_lock(&hci_dev_list_lock);
8035ded4 570 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
571 if (d->id == index) {
572 hdev = hci_dev_hold(d);
573 break;
574 }
575 }
576 read_unlock(&hci_dev_list_lock);
577 return hdev;
578}
1da177e4
LT
579
580/* ---- Inquiry support ---- */
ff9ef578 581
30dc78e1
JH
582bool hci_discovery_active(struct hci_dev *hdev)
583{
584 struct discovery_state *discov = &hdev->discovery;
585
6fbe195d 586 switch (discov->state) {
343f935b 587 case DISCOVERY_FINDING:
6fbe195d 588 case DISCOVERY_RESOLVING:
30dc78e1
JH
589 return true;
590
6fbe195d
AG
591 default:
592 return false;
593 }
30dc78e1
JH
594}
595
ff9ef578
JH
596void hci_discovery_set_state(struct hci_dev *hdev, int state)
597{
598 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
599
600 if (hdev->discovery.state == state)
601 return;
602
603 switch (state) {
604 case DISCOVERY_STOPPED:
7b99b659
AG
605 if (hdev->discovery.state != DISCOVERY_STARTING)
606 mgmt_discovering(hdev, 0);
ff9ef578
JH
607 break;
608 case DISCOVERY_STARTING:
609 break;
343f935b 610 case DISCOVERY_FINDING:
ff9ef578
JH
611 mgmt_discovering(hdev, 1);
612 break;
30dc78e1
JH
613 case DISCOVERY_RESOLVING:
614 break;
ff9ef578
JH
615 case DISCOVERY_STOPPING:
616 break;
617 }
618
619 hdev->discovery.state = state;
620}
621
1da177e4
LT
622static void inquiry_cache_flush(struct hci_dev *hdev)
623{
30883512 624 struct discovery_state *cache = &hdev->discovery;
b57c1a56 625 struct inquiry_entry *p, *n;
1da177e4 626
561aafbc
JH
627 list_for_each_entry_safe(p, n, &cache->all, all) {
628 list_del(&p->all);
b57c1a56 629 kfree(p);
1da177e4 630 }
561aafbc
JH
631
632 INIT_LIST_HEAD(&cache->unknown);
633 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
634}
635
a8c5fb1a
GP
636struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
637 bdaddr_t *bdaddr)
1da177e4 638{
30883512 639 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
640 struct inquiry_entry *e;
641
6ed93dc6 642 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 643
561aafbc
JH
644 list_for_each_entry(e, &cache->all, all) {
645 if (!bacmp(&e->data.bdaddr, bdaddr))
646 return e;
647 }
648
649 return NULL;
650}
651
652struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 653 bdaddr_t *bdaddr)
561aafbc 654{
30883512 655 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
656 struct inquiry_entry *e;
657
6ed93dc6 658 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
659
660 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 661 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
662 return e;
663 }
664
665 return NULL;
1da177e4
LT
666}
667
30dc78e1 668struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
669 bdaddr_t *bdaddr,
670 int state)
30dc78e1
JH
671{
672 struct discovery_state *cache = &hdev->discovery;
673 struct inquiry_entry *e;
674
6ed93dc6 675 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
676
677 list_for_each_entry(e, &cache->resolve, list) {
678 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
679 return e;
680 if (!bacmp(&e->data.bdaddr, bdaddr))
681 return e;
682 }
683
684 return NULL;
685}
686
a3d4e20a 687void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 688 struct inquiry_entry *ie)
a3d4e20a
JH
689{
690 struct discovery_state *cache = &hdev->discovery;
691 struct list_head *pos = &cache->resolve;
692 struct inquiry_entry *p;
693
694 list_del(&ie->list);
695
696 list_for_each_entry(p, &cache->resolve, list) {
697 if (p->name_state != NAME_PENDING &&
a8c5fb1a 698 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
699 break;
700 pos = &p->list;
701 }
702
703 list_add(&ie->list, pos);
704}
705
3175405b 706bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 707 bool name_known, bool *ssp)
1da177e4 708{
30883512 709 struct discovery_state *cache = &hdev->discovery;
70f23020 710 struct inquiry_entry *ie;
1da177e4 711
6ed93dc6 712 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 713
2b2fec4d
SJ
714 hci_remove_remote_oob_data(hdev, &data->bdaddr);
715
388fc8fa
JH
716 if (ssp)
717 *ssp = data->ssp_mode;
718
70f23020 719 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 720 if (ie) {
388fc8fa
JH
721 if (ie->data.ssp_mode && ssp)
722 *ssp = true;
723
a3d4e20a 724 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 725 data->rssi != ie->data.rssi) {
a3d4e20a
JH
726 ie->data.rssi = data->rssi;
727 hci_inquiry_cache_update_resolve(hdev, ie);
728 }
729
561aafbc 730 goto update;
a3d4e20a 731 }
561aafbc
JH
732
733 /* Entry not in the cache. Add new one. */
734 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
735 if (!ie)
3175405b 736 return false;
561aafbc
JH
737
738 list_add(&ie->all, &cache->all);
739
740 if (name_known) {
741 ie->name_state = NAME_KNOWN;
742 } else {
743 ie->name_state = NAME_NOT_KNOWN;
744 list_add(&ie->list, &cache->unknown);
745 }
70f23020 746
561aafbc
JH
747update:
748 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 749 ie->name_state != NAME_PENDING) {
561aafbc
JH
750 ie->name_state = NAME_KNOWN;
751 list_del(&ie->list);
1da177e4
LT
752 }
753
70f23020
AE
754 memcpy(&ie->data, data, sizeof(*data));
755 ie->timestamp = jiffies;
1da177e4 756 cache->timestamp = jiffies;
3175405b
JH
757
758 if (ie->name_state == NAME_NOT_KNOWN)
759 return false;
760
761 return true;
1da177e4
LT
762}
763
764static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
765{
30883512 766 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
767 struct inquiry_info *info = (struct inquiry_info *) buf;
768 struct inquiry_entry *e;
769 int copied = 0;
770
561aafbc 771 list_for_each_entry(e, &cache->all, all) {
1da177e4 772 struct inquiry_data *data = &e->data;
b57c1a56
JH
773
774 if (copied >= num)
775 break;
776
1da177e4
LT
777 bacpy(&info->bdaddr, &data->bdaddr);
778 info->pscan_rep_mode = data->pscan_rep_mode;
779 info->pscan_period_mode = data->pscan_period_mode;
780 info->pscan_mode = data->pscan_mode;
781 memcpy(info->dev_class, data->dev_class, 3);
782 info->clock_offset = data->clock_offset;
b57c1a56 783
1da177e4 784 info++;
b57c1a56 785 copied++;
1da177e4
LT
786 }
787
788 BT_DBG("cache %p, copied %d", cache, copied);
789 return copied;
790}
791
42c6b129 792static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
793{
794 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 795 struct hci_dev *hdev = req->hdev;
1da177e4
LT
796 struct hci_cp_inquiry cp;
797
798 BT_DBG("%s", hdev->name);
799
800 if (test_bit(HCI_INQUIRY, &hdev->flags))
801 return;
802
803 /* Start Inquiry */
804 memcpy(&cp.lap, &ir->lap, 3);
805 cp.length = ir->length;
806 cp.num_rsp = ir->num_rsp;
42c6b129 807 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
808}
809
810int hci_inquiry(void __user *arg)
811{
812 __u8 __user *ptr = arg;
813 struct hci_inquiry_req ir;
814 struct hci_dev *hdev;
815 int err = 0, do_inquiry = 0, max_rsp;
816 long timeo;
817 __u8 *buf;
818
819 if (copy_from_user(&ir, ptr, sizeof(ir)))
820 return -EFAULT;
821
5a08ecce
AE
822 hdev = hci_dev_get(ir.dev_id);
823 if (!hdev)
1da177e4
LT
824 return -ENODEV;
825
09fd0de5 826 hci_dev_lock(hdev);
8e87d142 827 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 828 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
829 inquiry_cache_flush(hdev);
830 do_inquiry = 1;
831 }
09fd0de5 832 hci_dev_unlock(hdev);
1da177e4 833
04837f64 834 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
835
836 if (do_inquiry) {
01178cd4
JH
837 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
838 timeo);
70f23020
AE
839 if (err < 0)
840 goto done;
841 }
1da177e4 842
8fc9ced3
GP
843 /* for unlimited number of responses we will use buffer with
844 * 255 entries
845 */
1da177e4
LT
846 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
847
848 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
849 * copy it to the user space.
850 */
01df8c31 851 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 852 if (!buf) {
1da177e4
LT
853 err = -ENOMEM;
854 goto done;
855 }
856
09fd0de5 857 hci_dev_lock(hdev);
1da177e4 858 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 859 hci_dev_unlock(hdev);
1da177e4
LT
860
861 BT_DBG("num_rsp %d", ir.num_rsp);
862
863 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
864 ptr += sizeof(ir);
865 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 866 ir.num_rsp))
1da177e4 867 err = -EFAULT;
8e87d142 868 } else
1da177e4
LT
869 err = -EFAULT;
870
871 kfree(buf);
872
873done:
874 hci_dev_put(hdev);
875 return err;
876}
877
3f0f524b
JH
878static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
879{
880 u8 ad_len = 0, flags = 0;
881 size_t name_len;
882
883 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
884 flags |= LE_AD_GENERAL;
885
886 if (!lmp_bredr_capable(hdev))
887 flags |= LE_AD_NO_BREDR;
888
889 if (lmp_le_br_capable(hdev))
890 flags |= LE_AD_SIM_LE_BREDR_CTRL;
891
892 if (lmp_host_le_br_capable(hdev))
893 flags |= LE_AD_SIM_LE_BREDR_HOST;
894
895 if (flags) {
896 BT_DBG("adv flags 0x%02x", flags);
897
898 ptr[0] = 2;
899 ptr[1] = EIR_FLAGS;
900 ptr[2] = flags;
901
902 ad_len += 3;
903 ptr += 3;
904 }
905
906 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
907 ptr[0] = 2;
908 ptr[1] = EIR_TX_POWER;
909 ptr[2] = (u8) hdev->adv_tx_power;
910
911 ad_len += 3;
912 ptr += 3;
913 }
914
915 name_len = strlen(hdev->dev_name);
916 if (name_len > 0) {
917 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
918
919 if (name_len > max_len) {
920 name_len = max_len;
921 ptr[1] = EIR_NAME_SHORT;
922 } else
923 ptr[1] = EIR_NAME_COMPLETE;
924
925 ptr[0] = name_len + 1;
926
927 memcpy(ptr + 2, hdev->dev_name, name_len);
928
929 ad_len += (name_len + 2);
930 ptr += (name_len + 2);
931 }
932
933 return ad_len;
934}
935
936int hci_update_ad(struct hci_dev *hdev)
937{
938 struct hci_cp_le_set_adv_data cp;
939 u8 len;
940 int err;
941
942 hci_dev_lock(hdev);
943
944 if (!lmp_le_capable(hdev)) {
945 err = -EINVAL;
946 goto unlock;
947 }
948
949 memset(&cp, 0, sizeof(cp));
950
951 len = create_ad(hdev, cp.data);
952
953 if (hdev->adv_data_len == len &&
954 memcmp(cp.data, hdev->adv_data, len) == 0) {
955 err = 0;
956 goto unlock;
957 }
958
959 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
960 hdev->adv_data_len = len;
961
962 cp.length = len;
963 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
964
965unlock:
966 hci_dev_unlock(hdev);
967
968 return err;
969}
970
1da177e4
LT
971/* ---- HCI ioctl helpers ---- */
972
973int hci_dev_open(__u16 dev)
974{
975 struct hci_dev *hdev;
976 int ret = 0;
977
5a08ecce
AE
978 hdev = hci_dev_get(dev);
979 if (!hdev)
1da177e4
LT
980 return -ENODEV;
981
982 BT_DBG("%s %p", hdev->name, hdev);
983
984 hci_req_lock(hdev);
985
94324962
JH
986 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
987 ret = -ENODEV;
988 goto done;
989 }
990
611b30f7
MH
991 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
992 ret = -ERFKILL;
993 goto done;
994 }
995
1da177e4
LT
996 if (test_bit(HCI_UP, &hdev->flags)) {
997 ret = -EALREADY;
998 goto done;
999 }
1000
1001 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1002 set_bit(HCI_RAW, &hdev->flags);
1003
07e3b94a
AE
1004 /* Treat all non BR/EDR controllers as raw devices if
1005 enable_hs is not set */
1006 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
1007 set_bit(HCI_RAW, &hdev->flags);
1008
1da177e4
LT
1009 if (hdev->open(hdev)) {
1010 ret = -EIO;
1011 goto done;
1012 }
1013
1014 if (!test_bit(HCI_RAW, &hdev->flags)) {
1015 atomic_set(&hdev->cmd_cnt, 1);
1016 set_bit(HCI_INIT, &hdev->flags);
2177bab5 1017 ret = __hci_init(hdev);
1da177e4
LT
1018 clear_bit(HCI_INIT, &hdev->flags);
1019 }
1020
1021 if (!ret) {
1022 hci_dev_hold(hdev);
1023 set_bit(HCI_UP, &hdev->flags);
1024 hci_notify(hdev, HCI_DEV_UP);
3f0f524b 1025 hci_update_ad(hdev);
bb4b2a9a
AE
1026 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1027 mgmt_valid_hdev(hdev)) {
09fd0de5 1028 hci_dev_lock(hdev);
744cf19e 1029 mgmt_powered(hdev, 1);
09fd0de5 1030 hci_dev_unlock(hdev);
56e5cb86 1031 }
8e87d142 1032 } else {
1da177e4 1033 /* Init failed, cleanup */
3eff45ea 1034 flush_work(&hdev->tx_work);
c347b765 1035 flush_work(&hdev->cmd_work);
b78752cc 1036 flush_work(&hdev->rx_work);
1da177e4
LT
1037
1038 skb_queue_purge(&hdev->cmd_q);
1039 skb_queue_purge(&hdev->rx_q);
1040
1041 if (hdev->flush)
1042 hdev->flush(hdev);
1043
1044 if (hdev->sent_cmd) {
1045 kfree_skb(hdev->sent_cmd);
1046 hdev->sent_cmd = NULL;
1047 }
1048
1049 hdev->close(hdev);
1050 hdev->flags = 0;
1051 }
1052
1053done:
1054 hci_req_unlock(hdev);
1055 hci_dev_put(hdev);
1056 return ret;
1057}
1058
1059static int hci_dev_do_close(struct hci_dev *hdev)
1060{
1061 BT_DBG("%s %p", hdev->name, hdev);
1062
28b75a89
AG
1063 cancel_work_sync(&hdev->le_scan);
1064
78c04c0b
VCG
1065 cancel_delayed_work(&hdev->power_off);
1066
1da177e4
LT
1067 hci_req_cancel(hdev, ENODEV);
1068 hci_req_lock(hdev);
1069
1070 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1071 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1072 hci_req_unlock(hdev);
1073 return 0;
1074 }
1075
3eff45ea
GP
1076 /* Flush RX and TX works */
1077 flush_work(&hdev->tx_work);
b78752cc 1078 flush_work(&hdev->rx_work);
1da177e4 1079
16ab91ab 1080 if (hdev->discov_timeout > 0) {
e0f9309f 1081 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1082 hdev->discov_timeout = 0;
5e5282bb 1083 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1084 }
1085
a8b2d5c2 1086 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1087 cancel_delayed_work(&hdev->service_cache);
1088
7ba8b4be
AG
1089 cancel_delayed_work_sync(&hdev->le_scan_disable);
1090
09fd0de5 1091 hci_dev_lock(hdev);
1da177e4
LT
1092 inquiry_cache_flush(hdev);
1093 hci_conn_hash_flush(hdev);
09fd0de5 1094 hci_dev_unlock(hdev);
1da177e4
LT
1095
1096 hci_notify(hdev, HCI_DEV_DOWN);
1097
1098 if (hdev->flush)
1099 hdev->flush(hdev);
1100
1101 /* Reset device */
1102 skb_queue_purge(&hdev->cmd_q);
1103 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1104 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1105 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1106 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1107 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1108 clear_bit(HCI_INIT, &hdev->flags);
1109 }
1110
c347b765
GP
1111 /* flush cmd work */
1112 flush_work(&hdev->cmd_work);
1da177e4
LT
1113
1114 /* Drop queues */
1115 skb_queue_purge(&hdev->rx_q);
1116 skb_queue_purge(&hdev->cmd_q);
1117 skb_queue_purge(&hdev->raw_q);
1118
1119 /* Drop last sent command */
1120 if (hdev->sent_cmd) {
b79f44c1 1121 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1122 kfree_skb(hdev->sent_cmd);
1123 hdev->sent_cmd = NULL;
1124 }
1125
1126 /* After this point our queues are empty
1127 * and no tasks are scheduled. */
1128 hdev->close(hdev);
1129
bb4b2a9a
AE
1130 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1131 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1132 hci_dev_lock(hdev);
1133 mgmt_powered(hdev, 0);
1134 hci_dev_unlock(hdev);
1135 }
5add6af8 1136
1da177e4
LT
1137 /* Clear flags */
1138 hdev->flags = 0;
1139
ced5c338
AE
1140 /* Controller radio is available but is currently powered down */
1141 hdev->amp_status = 0;
1142
e59fda8d 1143 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1144 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1145
1da177e4
LT
1146 hci_req_unlock(hdev);
1147
1148 hci_dev_put(hdev);
1149 return 0;
1150}
1151
1152int hci_dev_close(__u16 dev)
1153{
1154 struct hci_dev *hdev;
1155 int err;
1156
70f23020
AE
1157 hdev = hci_dev_get(dev);
1158 if (!hdev)
1da177e4 1159 return -ENODEV;
8ee56540
MH
1160
1161 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1162 cancel_delayed_work(&hdev->power_off);
1163
1da177e4 1164 err = hci_dev_do_close(hdev);
8ee56540 1165
1da177e4
LT
1166 hci_dev_put(hdev);
1167 return err;
1168}
1169
1170int hci_dev_reset(__u16 dev)
1171{
1172 struct hci_dev *hdev;
1173 int ret = 0;
1174
70f23020
AE
1175 hdev = hci_dev_get(dev);
1176 if (!hdev)
1da177e4
LT
1177 return -ENODEV;
1178
1179 hci_req_lock(hdev);
1da177e4
LT
1180
1181 if (!test_bit(HCI_UP, &hdev->flags))
1182 goto done;
1183
1184 /* Drop queues */
1185 skb_queue_purge(&hdev->rx_q);
1186 skb_queue_purge(&hdev->cmd_q);
1187
09fd0de5 1188 hci_dev_lock(hdev);
1da177e4
LT
1189 inquiry_cache_flush(hdev);
1190 hci_conn_hash_flush(hdev);
09fd0de5 1191 hci_dev_unlock(hdev);
1da177e4
LT
1192
1193 if (hdev->flush)
1194 hdev->flush(hdev);
1195
8e87d142 1196 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1197 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1198
1199 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1200 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1201
1202done:
1da177e4
LT
1203 hci_req_unlock(hdev);
1204 hci_dev_put(hdev);
1205 return ret;
1206}
1207
1208int hci_dev_reset_stat(__u16 dev)
1209{
1210 struct hci_dev *hdev;
1211 int ret = 0;
1212
70f23020
AE
1213 hdev = hci_dev_get(dev);
1214 if (!hdev)
1da177e4
LT
1215 return -ENODEV;
1216
1217 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1218
1219 hci_dev_put(hdev);
1220
1221 return ret;
1222}
1223
1224int hci_dev_cmd(unsigned int cmd, void __user *arg)
1225{
1226 struct hci_dev *hdev;
1227 struct hci_dev_req dr;
1228 int err = 0;
1229
1230 if (copy_from_user(&dr, arg, sizeof(dr)))
1231 return -EFAULT;
1232
70f23020
AE
1233 hdev = hci_dev_get(dr.dev_id);
1234 if (!hdev)
1da177e4
LT
1235 return -ENODEV;
1236
1237 switch (cmd) {
1238 case HCISETAUTH:
01178cd4
JH
1239 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1240 HCI_INIT_TIMEOUT);
1da177e4
LT
1241 break;
1242
1243 case HCISETENCRYPT:
1244 if (!lmp_encrypt_capable(hdev)) {
1245 err = -EOPNOTSUPP;
1246 break;
1247 }
1248
1249 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1250 /* Auth must be enabled first */
01178cd4
JH
1251 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1252 HCI_INIT_TIMEOUT);
1da177e4
LT
1253 if (err)
1254 break;
1255 }
1256
01178cd4
JH
1257 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1258 HCI_INIT_TIMEOUT);
1da177e4
LT
1259 break;
1260
1261 case HCISETSCAN:
01178cd4
JH
1262 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1263 HCI_INIT_TIMEOUT);
1da177e4
LT
1264 break;
1265
1da177e4 1266 case HCISETLINKPOL:
01178cd4
JH
1267 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1268 HCI_INIT_TIMEOUT);
1da177e4
LT
1269 break;
1270
1271 case HCISETLINKMODE:
e4e8e37c
MH
1272 hdev->link_mode = ((__u16) dr.dev_opt) &
1273 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1274 break;
1275
1276 case HCISETPTYPE:
1277 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1278 break;
1279
1280 case HCISETACLMTU:
e4e8e37c
MH
1281 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1282 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1283 break;
1284
1285 case HCISETSCOMTU:
e4e8e37c
MH
1286 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1287 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1288 break;
1289
1290 default:
1291 err = -EINVAL;
1292 break;
1293 }
e4e8e37c 1294
1da177e4
LT
1295 hci_dev_put(hdev);
1296 return err;
1297}
1298
1299int hci_get_dev_list(void __user *arg)
1300{
8035ded4 1301 struct hci_dev *hdev;
1da177e4
LT
1302 struct hci_dev_list_req *dl;
1303 struct hci_dev_req *dr;
1da177e4
LT
1304 int n = 0, size, err;
1305 __u16 dev_num;
1306
1307 if (get_user(dev_num, (__u16 __user *) arg))
1308 return -EFAULT;
1309
1310 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1311 return -EINVAL;
1312
1313 size = sizeof(*dl) + dev_num * sizeof(*dr);
1314
70f23020
AE
1315 dl = kzalloc(size, GFP_KERNEL);
1316 if (!dl)
1da177e4
LT
1317 return -ENOMEM;
1318
1319 dr = dl->dev_req;
1320
f20d09d5 1321 read_lock(&hci_dev_list_lock);
8035ded4 1322 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1323 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1324 cancel_delayed_work(&hdev->power_off);
c542a06c 1325
a8b2d5c2
JH
1326 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1327 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1328
1da177e4
LT
1329 (dr + n)->dev_id = hdev->id;
1330 (dr + n)->dev_opt = hdev->flags;
c542a06c 1331
1da177e4
LT
1332 if (++n >= dev_num)
1333 break;
1334 }
f20d09d5 1335 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1336
1337 dl->dev_num = n;
1338 size = sizeof(*dl) + n * sizeof(*dr);
1339
1340 err = copy_to_user(arg, dl, size);
1341 kfree(dl);
1342
1343 return err ? -EFAULT : 0;
1344}
1345
1346int hci_get_dev_info(void __user *arg)
1347{
1348 struct hci_dev *hdev;
1349 struct hci_dev_info di;
1350 int err = 0;
1351
1352 if (copy_from_user(&di, arg, sizeof(di)))
1353 return -EFAULT;
1354
70f23020
AE
1355 hdev = hci_dev_get(di.dev_id);
1356 if (!hdev)
1da177e4
LT
1357 return -ENODEV;
1358
a8b2d5c2 1359 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1360 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1361
a8b2d5c2
JH
1362 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1363 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1364
1da177e4
LT
1365 strcpy(di.name, hdev->name);
1366 di.bdaddr = hdev->bdaddr;
943da25d 1367 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1368 di.flags = hdev->flags;
1369 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1370 if (lmp_bredr_capable(hdev)) {
1371 di.acl_mtu = hdev->acl_mtu;
1372 di.acl_pkts = hdev->acl_pkts;
1373 di.sco_mtu = hdev->sco_mtu;
1374 di.sco_pkts = hdev->sco_pkts;
1375 } else {
1376 di.acl_mtu = hdev->le_mtu;
1377 di.acl_pkts = hdev->le_pkts;
1378 di.sco_mtu = 0;
1379 di.sco_pkts = 0;
1380 }
1da177e4
LT
1381 di.link_policy = hdev->link_policy;
1382 di.link_mode = hdev->link_mode;
1383
1384 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1385 memcpy(&di.features, &hdev->features, sizeof(di.features));
1386
1387 if (copy_to_user(arg, &di, sizeof(di)))
1388 err = -EFAULT;
1389
1390 hci_dev_put(hdev);
1391
1392 return err;
1393}
1394
1395/* ---- Interface to HCI drivers ---- */
1396
611b30f7
MH
1397static int hci_rfkill_set_block(void *data, bool blocked)
1398{
1399 struct hci_dev *hdev = data;
1400
1401 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1402
1403 if (!blocked)
1404 return 0;
1405
1406 hci_dev_do_close(hdev);
1407
1408 return 0;
1409}
1410
1411static const struct rfkill_ops hci_rfkill_ops = {
1412 .set_block = hci_rfkill_set_block,
1413};
1414
ab81cbf9
JH
1415static void hci_power_on(struct work_struct *work)
1416{
1417 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1418
1419 BT_DBG("%s", hdev->name);
1420
1421 if (hci_dev_open(hdev->id) < 0)
1422 return;
1423
a8b2d5c2 1424 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1425 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1426 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1427
a8b2d5c2 1428 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1429 mgmt_index_added(hdev);
ab81cbf9
JH
1430}
1431
1432static void hci_power_off(struct work_struct *work)
1433{
3243553f 1434 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1435 power_off.work);
ab81cbf9
JH
1436
1437 BT_DBG("%s", hdev->name);
1438
8ee56540 1439 hci_dev_do_close(hdev);
ab81cbf9
JH
1440}
1441
16ab91ab
JH
1442static void hci_discov_off(struct work_struct *work)
1443{
1444 struct hci_dev *hdev;
1445 u8 scan = SCAN_PAGE;
1446
1447 hdev = container_of(work, struct hci_dev, discov_off.work);
1448
1449 BT_DBG("%s", hdev->name);
1450
09fd0de5 1451 hci_dev_lock(hdev);
16ab91ab
JH
1452
1453 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1454
1455 hdev->discov_timeout = 0;
1456
09fd0de5 1457 hci_dev_unlock(hdev);
16ab91ab
JH
1458}
1459
2aeb9a1a
JH
1460int hci_uuids_clear(struct hci_dev *hdev)
1461{
4821002c 1462 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1463
4821002c
JH
1464 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1465 list_del(&uuid->list);
2aeb9a1a
JH
1466 kfree(uuid);
1467 }
1468
1469 return 0;
1470}
1471
55ed8ca1
JH
1472int hci_link_keys_clear(struct hci_dev *hdev)
1473{
1474 struct list_head *p, *n;
1475
1476 list_for_each_safe(p, n, &hdev->link_keys) {
1477 struct link_key *key;
1478
1479 key = list_entry(p, struct link_key, list);
1480
1481 list_del(p);
1482 kfree(key);
1483 }
1484
1485 return 0;
1486}
1487
b899efaf
VCG
1488int hci_smp_ltks_clear(struct hci_dev *hdev)
1489{
1490 struct smp_ltk *k, *tmp;
1491
1492 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1493 list_del(&k->list);
1494 kfree(k);
1495 }
1496
1497 return 0;
1498}
1499
55ed8ca1
JH
1500struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1501{
8035ded4 1502 struct link_key *k;
55ed8ca1 1503
8035ded4 1504 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1505 if (bacmp(bdaddr, &k->bdaddr) == 0)
1506 return k;
55ed8ca1
JH
1507
1508 return NULL;
1509}
1510
745c0ce3 1511static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1512 u8 key_type, u8 old_key_type)
d25e28ab
JH
1513{
1514 /* Legacy key */
1515 if (key_type < 0x03)
745c0ce3 1516 return true;
d25e28ab
JH
1517
1518 /* Debug keys are insecure so don't store them persistently */
1519 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1520 return false;
d25e28ab
JH
1521
1522 /* Changed combination key and there's no previous one */
1523 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1524 return false;
d25e28ab
JH
1525
1526 /* Security mode 3 case */
1527 if (!conn)
745c0ce3 1528 return true;
d25e28ab
JH
1529
1530 /* Neither local nor remote side had no-bonding as requirement */
1531 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1532 return true;
d25e28ab
JH
1533
1534 /* Local side had dedicated bonding as requirement */
1535 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1536 return true;
d25e28ab
JH
1537
1538 /* Remote side had dedicated bonding as requirement */
1539 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1540 return true;
d25e28ab
JH
1541
1542 /* If none of the above criteria match, then don't store the key
1543 * persistently */
745c0ce3 1544 return false;
d25e28ab
JH
1545}
1546
c9839a11 1547struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1548{
c9839a11 1549 struct smp_ltk *k;
75d262c2 1550
c9839a11
VCG
1551 list_for_each_entry(k, &hdev->long_term_keys, list) {
1552 if (k->ediv != ediv ||
a8c5fb1a 1553 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1554 continue;
1555
c9839a11 1556 return k;
75d262c2
VCG
1557 }
1558
1559 return NULL;
1560}
75d262c2 1561
c9839a11 1562struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1563 u8 addr_type)
75d262c2 1564{
c9839a11 1565 struct smp_ltk *k;
75d262c2 1566
c9839a11
VCG
1567 list_for_each_entry(k, &hdev->long_term_keys, list)
1568 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1569 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1570 return k;
1571
1572 return NULL;
1573}
75d262c2 1574
d25e28ab 1575int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1576 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1577{
1578 struct link_key *key, *old_key;
745c0ce3
VA
1579 u8 old_key_type;
1580 bool persistent;
55ed8ca1
JH
1581
1582 old_key = hci_find_link_key(hdev, bdaddr);
1583 if (old_key) {
1584 old_key_type = old_key->type;
1585 key = old_key;
1586 } else {
12adcf3a 1587 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1588 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1589 if (!key)
1590 return -ENOMEM;
1591 list_add(&key->list, &hdev->link_keys);
1592 }
1593
6ed93dc6 1594 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1595
d25e28ab
JH
1596 /* Some buggy controller combinations generate a changed
1597 * combination key for legacy pairing even when there's no
1598 * previous key */
1599 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1600 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1601 type = HCI_LK_COMBINATION;
655fe6ec
JH
1602 if (conn)
1603 conn->key_type = type;
1604 }
d25e28ab 1605
55ed8ca1 1606 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1607 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1608 key->pin_len = pin_len;
1609
b6020ba0 1610 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1611 key->type = old_key_type;
4748fed2
JH
1612 else
1613 key->type = type;
1614
4df378a1
JH
1615 if (!new_key)
1616 return 0;
1617
1618 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1619
744cf19e 1620 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1621
6ec5bcad
VA
1622 if (conn)
1623 conn->flush_key = !persistent;
55ed8ca1
JH
1624
1625 return 0;
1626}
1627
c9839a11 1628int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1629 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1630 ediv, u8 rand[8])
75d262c2 1631{
c9839a11 1632 struct smp_ltk *key, *old_key;
75d262c2 1633
c9839a11
VCG
1634 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1635 return 0;
75d262c2 1636
c9839a11
VCG
1637 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1638 if (old_key)
75d262c2 1639 key = old_key;
c9839a11
VCG
1640 else {
1641 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1642 if (!key)
1643 return -ENOMEM;
c9839a11 1644 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1645 }
1646
75d262c2 1647 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1648 key->bdaddr_type = addr_type;
1649 memcpy(key->val, tk, sizeof(key->val));
1650 key->authenticated = authenticated;
1651 key->ediv = ediv;
1652 key->enc_size = enc_size;
1653 key->type = type;
1654 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1655
c9839a11
VCG
1656 if (!new_key)
1657 return 0;
75d262c2 1658
261cc5aa
VCG
1659 if (type & HCI_SMP_LTK)
1660 mgmt_new_ltk(hdev, key, 1);
1661
75d262c2
VCG
1662 return 0;
1663}
1664
55ed8ca1
JH
1665int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1666{
1667 struct link_key *key;
1668
1669 key = hci_find_link_key(hdev, bdaddr);
1670 if (!key)
1671 return -ENOENT;
1672
6ed93dc6 1673 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1674
1675 list_del(&key->list);
1676 kfree(key);
1677
1678 return 0;
1679}
1680
b899efaf
VCG
1681int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1682{
1683 struct smp_ltk *k, *tmp;
1684
1685 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1686 if (bacmp(bdaddr, &k->bdaddr))
1687 continue;
1688
6ed93dc6 1689 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1690
1691 list_del(&k->list);
1692 kfree(k);
1693 }
1694
1695 return 0;
1696}
1697
6bd32326 1698/* HCI command timer function */
bda4f23a 1699static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1700{
1701 struct hci_dev *hdev = (void *) arg;
1702
bda4f23a
AE
1703 if (hdev->sent_cmd) {
1704 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1705 u16 opcode = __le16_to_cpu(sent->opcode);
1706
1707 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1708 } else {
1709 BT_ERR("%s command tx timeout", hdev->name);
1710 }
1711
6bd32326 1712 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1713 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1714}
1715
2763eda6 1716struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1717 bdaddr_t *bdaddr)
2763eda6
SJ
1718{
1719 struct oob_data *data;
1720
1721 list_for_each_entry(data, &hdev->remote_oob_data, list)
1722 if (bacmp(bdaddr, &data->bdaddr) == 0)
1723 return data;
1724
1725 return NULL;
1726}
1727
1728int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1729{
1730 struct oob_data *data;
1731
1732 data = hci_find_remote_oob_data(hdev, bdaddr);
1733 if (!data)
1734 return -ENOENT;
1735
6ed93dc6 1736 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1737
1738 list_del(&data->list);
1739 kfree(data);
1740
1741 return 0;
1742}
1743
1744int hci_remote_oob_data_clear(struct hci_dev *hdev)
1745{
1746 struct oob_data *data, *n;
1747
1748 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1749 list_del(&data->list);
1750 kfree(data);
1751 }
1752
1753 return 0;
1754}
1755
1756int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1757 u8 *randomizer)
2763eda6
SJ
1758{
1759 struct oob_data *data;
1760
1761 data = hci_find_remote_oob_data(hdev, bdaddr);
1762
1763 if (!data) {
1764 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1765 if (!data)
1766 return -ENOMEM;
1767
1768 bacpy(&data->bdaddr, bdaddr);
1769 list_add(&data->list, &hdev->remote_oob_data);
1770 }
1771
1772 memcpy(data->hash, hash, sizeof(data->hash));
1773 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1774
6ed93dc6 1775 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1776
1777 return 0;
1778}
1779
04124681 1780struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1781{
8035ded4 1782 struct bdaddr_list *b;
b2a66aad 1783
8035ded4 1784 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1785 if (bacmp(bdaddr, &b->bdaddr) == 0)
1786 return b;
b2a66aad
AJ
1787
1788 return NULL;
1789}
1790
1791int hci_blacklist_clear(struct hci_dev *hdev)
1792{
1793 struct list_head *p, *n;
1794
1795 list_for_each_safe(p, n, &hdev->blacklist) {
1796 struct bdaddr_list *b;
1797
1798 b = list_entry(p, struct bdaddr_list, list);
1799
1800 list_del(p);
1801 kfree(b);
1802 }
1803
1804 return 0;
1805}
1806
88c1fe4b 1807int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1808{
1809 struct bdaddr_list *entry;
b2a66aad
AJ
1810
1811 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1812 return -EBADF;
1813
5e762444
AJ
1814 if (hci_blacklist_lookup(hdev, bdaddr))
1815 return -EEXIST;
b2a66aad
AJ
1816
1817 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1818 if (!entry)
1819 return -ENOMEM;
b2a66aad
AJ
1820
1821 bacpy(&entry->bdaddr, bdaddr);
1822
1823 list_add(&entry->list, &hdev->blacklist);
1824
88c1fe4b 1825 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1826}
1827
88c1fe4b 1828int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1829{
1830 struct bdaddr_list *entry;
b2a66aad 1831
1ec918ce 1832 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1833 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1834
1835 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1836 if (!entry)
5e762444 1837 return -ENOENT;
b2a66aad
AJ
1838
1839 list_del(&entry->list);
1840 kfree(entry);
1841
88c1fe4b 1842 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1843}
1844
42c6b129 1845static void le_scan_param_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1846{
1847 struct le_scan_params *param = (struct le_scan_params *) opt;
1848 struct hci_cp_le_set_scan_param cp;
1849
1850 memset(&cp, 0, sizeof(cp));
1851 cp.type = param->type;
1852 cp.interval = cpu_to_le16(param->interval);
1853 cp.window = cpu_to_le16(param->window);
1854
42c6b129 1855 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
7ba8b4be
AG
1856}
1857
42c6b129 1858static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1859{
1860 struct hci_cp_le_set_scan_enable cp;
1861
1862 memset(&cp, 0, sizeof(cp));
1863 cp.enable = 1;
0431a43c 1864 cp.filter_dup = 1;
7ba8b4be 1865
42c6b129 1866 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
7ba8b4be
AG
1867}
1868
1869static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1870 u16 window, int timeout)
7ba8b4be
AG
1871{
1872 long timeo = msecs_to_jiffies(3000);
1873 struct le_scan_params param;
1874 int err;
1875
1876 BT_DBG("%s", hdev->name);
1877
1878 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1879 return -EINPROGRESS;
1880
1881 param.type = type;
1882 param.interval = interval;
1883 param.window = window;
1884
1885 hci_req_lock(hdev);
1886
01178cd4
JH
1887 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1888 timeo);
7ba8b4be 1889 if (!err)
01178cd4 1890 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
1891
1892 hci_req_unlock(hdev);
1893
1894 if (err < 0)
1895 return err;
1896
46818ed5
JH
1897 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1898 msecs_to_jiffies(timeout));
7ba8b4be
AG
1899
1900 return 0;
1901}
1902
7dbfac1d
AG
1903int hci_cancel_le_scan(struct hci_dev *hdev)
1904{
1905 BT_DBG("%s", hdev->name);
1906
1907 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1908 return -EALREADY;
1909
1910 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1911 struct hci_cp_le_set_scan_enable cp;
1912
1913 /* Send HCI command to disable LE Scan */
1914 memset(&cp, 0, sizeof(cp));
1915 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1916 }
1917
1918 return 0;
1919}
1920
7ba8b4be
AG
1921static void le_scan_disable_work(struct work_struct *work)
1922{
1923 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1924 le_scan_disable.work);
7ba8b4be
AG
1925 struct hci_cp_le_set_scan_enable cp;
1926
1927 BT_DBG("%s", hdev->name);
1928
1929 memset(&cp, 0, sizeof(cp));
1930
1931 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1932}
1933
28b75a89
AG
1934static void le_scan_work(struct work_struct *work)
1935{
1936 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1937 struct le_scan_params *param = &hdev->le_scan_params;
1938
1939 BT_DBG("%s", hdev->name);
1940
04124681
GP
1941 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1942 param->timeout);
28b75a89
AG
1943}
1944
1945int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1946 int timeout)
28b75a89
AG
1947{
1948 struct le_scan_params *param = &hdev->le_scan_params;
1949
1950 BT_DBG("%s", hdev->name);
1951
f1550478
JH
1952 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1953 return -ENOTSUPP;
1954
28b75a89
AG
1955 if (work_busy(&hdev->le_scan))
1956 return -EINPROGRESS;
1957
1958 param->type = type;
1959 param->interval = interval;
1960 param->window = window;
1961 param->timeout = timeout;
1962
1963 queue_work(system_long_wq, &hdev->le_scan);
1964
1965 return 0;
1966}
1967
9be0dab7
DH
1968/* Alloc HCI device */
1969struct hci_dev *hci_alloc_dev(void)
1970{
1971 struct hci_dev *hdev;
1972
1973 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1974 if (!hdev)
1975 return NULL;
1976
b1b813d4
DH
1977 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1978 hdev->esco_type = (ESCO_HV1);
1979 hdev->link_mode = (HCI_LM_ACCEPT);
1980 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
1981 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1982 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 1983
b1b813d4
DH
1984 hdev->sniff_max_interval = 800;
1985 hdev->sniff_min_interval = 80;
1986
1987 mutex_init(&hdev->lock);
1988 mutex_init(&hdev->req_lock);
1989
1990 INIT_LIST_HEAD(&hdev->mgmt_pending);
1991 INIT_LIST_HEAD(&hdev->blacklist);
1992 INIT_LIST_HEAD(&hdev->uuids);
1993 INIT_LIST_HEAD(&hdev->link_keys);
1994 INIT_LIST_HEAD(&hdev->long_term_keys);
1995 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 1996 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
1997
1998 INIT_WORK(&hdev->rx_work, hci_rx_work);
1999 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2000 INIT_WORK(&hdev->tx_work, hci_tx_work);
2001 INIT_WORK(&hdev->power_on, hci_power_on);
2002 INIT_WORK(&hdev->le_scan, le_scan_work);
2003
b1b813d4
DH
2004 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2005 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2006 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2007
9be0dab7 2008 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
2009 skb_queue_head_init(&hdev->rx_q);
2010 skb_queue_head_init(&hdev->cmd_q);
2011 skb_queue_head_init(&hdev->raw_q);
2012
2013 init_waitqueue_head(&hdev->req_wait_q);
2014
bda4f23a 2015 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2016
b1b813d4
DH
2017 hci_init_sysfs(hdev);
2018 discovery_init(hdev);
9be0dab7
DH
2019
2020 return hdev;
2021}
2022EXPORT_SYMBOL(hci_alloc_dev);
2023
2024/* Free HCI device */
2025void hci_free_dev(struct hci_dev *hdev)
2026{
2027 skb_queue_purge(&hdev->driver_init);
2028
2029 /* will free via device release */
2030 put_device(&hdev->dev);
2031}
2032EXPORT_SYMBOL(hci_free_dev);
2033
1da177e4
LT
2034/* Register HCI device */
2035int hci_register_dev(struct hci_dev *hdev)
2036{
b1b813d4 2037 int id, error;
1da177e4 2038
010666a1 2039 if (!hdev->open || !hdev->close)
1da177e4
LT
2040 return -EINVAL;
2041
08add513
MM
2042 /* Do not allow HCI_AMP devices to register at index 0,
2043 * so the index can be used as the AMP controller ID.
2044 */
3df92b31
SL
2045 switch (hdev->dev_type) {
2046 case HCI_BREDR:
2047 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2048 break;
2049 case HCI_AMP:
2050 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2051 break;
2052 default:
2053 return -EINVAL;
1da177e4 2054 }
8e87d142 2055
3df92b31
SL
2056 if (id < 0)
2057 return id;
2058
1da177e4
LT
2059 sprintf(hdev->name, "hci%d", id);
2060 hdev->id = id;
2d8b3a11
AE
2061
2062 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2063
3df92b31
SL
2064 write_lock(&hci_dev_list_lock);
2065 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2066 write_unlock(&hci_dev_list_lock);
1da177e4 2067
32845eb1 2068 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 2069 WQ_MEM_RECLAIM, 1);
33ca954d
DH
2070 if (!hdev->workqueue) {
2071 error = -ENOMEM;
2072 goto err;
2073 }
f48fd9c8 2074
6ead1bbc
JH
2075 hdev->req_workqueue = alloc_workqueue(hdev->name,
2076 WQ_HIGHPRI | WQ_UNBOUND |
2077 WQ_MEM_RECLAIM, 1);
2078 if (!hdev->req_workqueue) {
2079 destroy_workqueue(hdev->workqueue);
2080 error = -ENOMEM;
2081 goto err;
2082 }
2083
33ca954d
DH
2084 error = hci_add_sysfs(hdev);
2085 if (error < 0)
2086 goto err_wqueue;
1da177e4 2087
611b30f7 2088 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2089 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2090 hdev);
611b30f7
MH
2091 if (hdev->rfkill) {
2092 if (rfkill_register(hdev->rfkill) < 0) {
2093 rfkill_destroy(hdev->rfkill);
2094 hdev->rfkill = NULL;
2095 }
2096 }
2097
a8b2d5c2 2098 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2099
2100 if (hdev->dev_type != HCI_AMP)
2101 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2102
1da177e4 2103 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2104 hci_dev_hold(hdev);
1da177e4 2105
19202573 2106 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2107
1da177e4 2108 return id;
f48fd9c8 2109
33ca954d
DH
2110err_wqueue:
2111 destroy_workqueue(hdev->workqueue);
6ead1bbc 2112 destroy_workqueue(hdev->req_workqueue);
33ca954d 2113err:
3df92b31 2114 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2115 write_lock(&hci_dev_list_lock);
f48fd9c8 2116 list_del(&hdev->list);
f20d09d5 2117 write_unlock(&hci_dev_list_lock);
f48fd9c8 2118
33ca954d 2119 return error;
1da177e4
LT
2120}
2121EXPORT_SYMBOL(hci_register_dev);
2122
2123/* Unregister HCI device */
59735631 2124void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2125{
3df92b31 2126 int i, id;
ef222013 2127
c13854ce 2128 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2129
94324962
JH
2130 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2131
3df92b31
SL
2132 id = hdev->id;
2133
f20d09d5 2134 write_lock(&hci_dev_list_lock);
1da177e4 2135 list_del(&hdev->list);
f20d09d5 2136 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2137
2138 hci_dev_do_close(hdev);
2139
cd4c5391 2140 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2141 kfree_skb(hdev->reassembly[i]);
2142
b9b5ef18
GP
2143 cancel_work_sync(&hdev->power_on);
2144
ab81cbf9 2145 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2146 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2147 hci_dev_lock(hdev);
744cf19e 2148 mgmt_index_removed(hdev);
09fd0de5 2149 hci_dev_unlock(hdev);
56e5cb86 2150 }
ab81cbf9 2151
2e58ef3e
JH
2152 /* mgmt_index_removed should take care of emptying the
2153 * pending list */
2154 BUG_ON(!list_empty(&hdev->mgmt_pending));
2155
1da177e4
LT
2156 hci_notify(hdev, HCI_DEV_UNREG);
2157
611b30f7
MH
2158 if (hdev->rfkill) {
2159 rfkill_unregister(hdev->rfkill);
2160 rfkill_destroy(hdev->rfkill);
2161 }
2162
ce242970 2163 hci_del_sysfs(hdev);
147e2d59 2164
f48fd9c8 2165 destroy_workqueue(hdev->workqueue);
6ead1bbc 2166 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2167
09fd0de5 2168 hci_dev_lock(hdev);
e2e0cacb 2169 hci_blacklist_clear(hdev);
2aeb9a1a 2170 hci_uuids_clear(hdev);
55ed8ca1 2171 hci_link_keys_clear(hdev);
b899efaf 2172 hci_smp_ltks_clear(hdev);
2763eda6 2173 hci_remote_oob_data_clear(hdev);
09fd0de5 2174 hci_dev_unlock(hdev);
e2e0cacb 2175
dc946bd8 2176 hci_dev_put(hdev);
3df92b31
SL
2177
2178 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2179}
2180EXPORT_SYMBOL(hci_unregister_dev);
2181
2182/* Suspend HCI device */
2183int hci_suspend_dev(struct hci_dev *hdev)
2184{
2185 hci_notify(hdev, HCI_DEV_SUSPEND);
2186 return 0;
2187}
2188EXPORT_SYMBOL(hci_suspend_dev);
2189
2190/* Resume HCI device */
2191int hci_resume_dev(struct hci_dev *hdev)
2192{
2193 hci_notify(hdev, HCI_DEV_RESUME);
2194 return 0;
2195}
2196EXPORT_SYMBOL(hci_resume_dev);
2197
76bca880
MH
2198/* Receive frame from HCI drivers */
2199int hci_recv_frame(struct sk_buff *skb)
2200{
2201 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2202 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2203 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2204 kfree_skb(skb);
2205 return -ENXIO;
2206 }
2207
d82603c6 2208 /* Incoming skb */
76bca880
MH
2209 bt_cb(skb)->incoming = 1;
2210
2211 /* Time stamp */
2212 __net_timestamp(skb);
2213
76bca880 2214 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2215 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2216
76bca880
MH
2217 return 0;
2218}
2219EXPORT_SYMBOL(hci_recv_frame);
2220
33e882a5 2221static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2222 int count, __u8 index)
33e882a5
SS
2223{
2224 int len = 0;
2225 int hlen = 0;
2226 int remain = count;
2227 struct sk_buff *skb;
2228 struct bt_skb_cb *scb;
2229
2230 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2231 index >= NUM_REASSEMBLY)
33e882a5
SS
2232 return -EILSEQ;
2233
2234 skb = hdev->reassembly[index];
2235
2236 if (!skb) {
2237 switch (type) {
2238 case HCI_ACLDATA_PKT:
2239 len = HCI_MAX_FRAME_SIZE;
2240 hlen = HCI_ACL_HDR_SIZE;
2241 break;
2242 case HCI_EVENT_PKT:
2243 len = HCI_MAX_EVENT_SIZE;
2244 hlen = HCI_EVENT_HDR_SIZE;
2245 break;
2246 case HCI_SCODATA_PKT:
2247 len = HCI_MAX_SCO_SIZE;
2248 hlen = HCI_SCO_HDR_SIZE;
2249 break;
2250 }
2251
1e429f38 2252 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2253 if (!skb)
2254 return -ENOMEM;
2255
2256 scb = (void *) skb->cb;
2257 scb->expect = hlen;
2258 scb->pkt_type = type;
2259
2260 skb->dev = (void *) hdev;
2261 hdev->reassembly[index] = skb;
2262 }
2263
2264 while (count) {
2265 scb = (void *) skb->cb;
89bb46d0 2266 len = min_t(uint, scb->expect, count);
33e882a5
SS
2267
2268 memcpy(skb_put(skb, len), data, len);
2269
2270 count -= len;
2271 data += len;
2272 scb->expect -= len;
2273 remain = count;
2274
2275 switch (type) {
2276 case HCI_EVENT_PKT:
2277 if (skb->len == HCI_EVENT_HDR_SIZE) {
2278 struct hci_event_hdr *h = hci_event_hdr(skb);
2279 scb->expect = h->plen;
2280
2281 if (skb_tailroom(skb) < scb->expect) {
2282 kfree_skb(skb);
2283 hdev->reassembly[index] = NULL;
2284 return -ENOMEM;
2285 }
2286 }
2287 break;
2288
2289 case HCI_ACLDATA_PKT:
2290 if (skb->len == HCI_ACL_HDR_SIZE) {
2291 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2292 scb->expect = __le16_to_cpu(h->dlen);
2293
2294 if (skb_tailroom(skb) < scb->expect) {
2295 kfree_skb(skb);
2296 hdev->reassembly[index] = NULL;
2297 return -ENOMEM;
2298 }
2299 }
2300 break;
2301
2302 case HCI_SCODATA_PKT:
2303 if (skb->len == HCI_SCO_HDR_SIZE) {
2304 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2305 scb->expect = h->dlen;
2306
2307 if (skb_tailroom(skb) < scb->expect) {
2308 kfree_skb(skb);
2309 hdev->reassembly[index] = NULL;
2310 return -ENOMEM;
2311 }
2312 }
2313 break;
2314 }
2315
2316 if (scb->expect == 0) {
2317 /* Complete frame */
2318
2319 bt_cb(skb)->pkt_type = type;
2320 hci_recv_frame(skb);
2321
2322 hdev->reassembly[index] = NULL;
2323 return remain;
2324 }
2325 }
2326
2327 return remain;
2328}
2329
ef222013
MH
2330int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2331{
f39a3c06
SS
2332 int rem = 0;
2333
ef222013
MH
2334 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2335 return -EILSEQ;
2336
da5f6c37 2337 while (count) {
1e429f38 2338 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2339 if (rem < 0)
2340 return rem;
ef222013 2341
f39a3c06
SS
2342 data += (count - rem);
2343 count = rem;
f81c6224 2344 }
ef222013 2345
f39a3c06 2346 return rem;
ef222013
MH
2347}
2348EXPORT_SYMBOL(hci_recv_fragment);
2349
99811510
SS
2350#define STREAM_REASSEMBLY 0
2351
2352int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2353{
2354 int type;
2355 int rem = 0;
2356
da5f6c37 2357 while (count) {
99811510
SS
2358 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2359
2360 if (!skb) {
2361 struct { char type; } *pkt;
2362
2363 /* Start of the frame */
2364 pkt = data;
2365 type = pkt->type;
2366
2367 data++;
2368 count--;
2369 } else
2370 type = bt_cb(skb)->pkt_type;
2371
1e429f38 2372 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2373 STREAM_REASSEMBLY);
99811510
SS
2374 if (rem < 0)
2375 return rem;
2376
2377 data += (count - rem);
2378 count = rem;
f81c6224 2379 }
99811510
SS
2380
2381 return rem;
2382}
2383EXPORT_SYMBOL(hci_recv_stream_fragment);
2384
1da177e4
LT
2385/* ---- Interface to upper protocols ---- */
2386
1da177e4
LT
2387int hci_register_cb(struct hci_cb *cb)
2388{
2389 BT_DBG("%p name %s", cb, cb->name);
2390
f20d09d5 2391 write_lock(&hci_cb_list_lock);
1da177e4 2392 list_add(&cb->list, &hci_cb_list);
f20d09d5 2393 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2394
2395 return 0;
2396}
2397EXPORT_SYMBOL(hci_register_cb);
2398
2399int hci_unregister_cb(struct hci_cb *cb)
2400{
2401 BT_DBG("%p name %s", cb, cb->name);
2402
f20d09d5 2403 write_lock(&hci_cb_list_lock);
1da177e4 2404 list_del(&cb->list);
f20d09d5 2405 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2406
2407 return 0;
2408}
2409EXPORT_SYMBOL(hci_unregister_cb);
2410
2411static int hci_send_frame(struct sk_buff *skb)
2412{
2413 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2414
2415 if (!hdev) {
2416 kfree_skb(skb);
2417 return -ENODEV;
2418 }
2419
0d48d939 2420 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2421
cd82e61c
MH
2422 /* Time stamp */
2423 __net_timestamp(skb);
1da177e4 2424
cd82e61c
MH
2425 /* Send copy to monitor */
2426 hci_send_to_monitor(hdev, skb);
2427
2428 if (atomic_read(&hdev->promisc)) {
2429 /* Send copy to the sockets */
470fe1b5 2430 hci_send_to_sock(hdev, skb);
1da177e4
LT
2431 }
2432
2433 /* Get rid of skb owner, prior to sending to the driver. */
2434 skb_orphan(skb);
2435
2436 return hdev->send(skb);
2437}
2438
3119ae95
JH
2439void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2440{
2441 skb_queue_head_init(&req->cmd_q);
2442 req->hdev = hdev;
2443}
2444
2445int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2446{
2447 struct hci_dev *hdev = req->hdev;
2448 struct sk_buff *skb;
2449 unsigned long flags;
2450
2451 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2452
2453 /* Do not allow empty requests */
2454 if (skb_queue_empty(&req->cmd_q))
2455 return -EINVAL;
2456
2457 skb = skb_peek_tail(&req->cmd_q);
2458 bt_cb(skb)->req.complete = complete;
2459
2460 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2461 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2462 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2463
2464 queue_work(hdev->workqueue, &hdev->cmd_work);
2465
2466 return 0;
2467}
2468
1ca3a9d0
JH
2469static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2470 u32 plen, void *param)
1da177e4
LT
2471{
2472 int len = HCI_COMMAND_HDR_SIZE + plen;
2473 struct hci_command_hdr *hdr;
2474 struct sk_buff *skb;
2475
1da177e4 2476 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2477 if (!skb)
2478 return NULL;
1da177e4
LT
2479
2480 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2481 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2482 hdr->plen = plen;
2483
2484 if (plen)
2485 memcpy(skb_put(skb, plen), param, plen);
2486
2487 BT_DBG("skb len %d", skb->len);
2488
0d48d939 2489 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2490 skb->dev = (void *) hdev;
c78ae283 2491
1ca3a9d0
JH
2492 return skb;
2493}
2494
2495/* Send HCI command */
2496int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2497{
2498 struct sk_buff *skb;
2499
2500 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2501
2502 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2503 if (!skb) {
2504 BT_ERR("%s no memory for command", hdev->name);
2505 return -ENOMEM;
2506 }
2507
11714b3d
JH
2508 /* Stand-alone HCI commands must be flaged as
2509 * single-command requests.
2510 */
2511 bt_cb(skb)->req.start = true;
2512
1da177e4 2513 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2514 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2515
2516 return 0;
2517}
1da177e4 2518
71c76a17
JH
2519/* Queue a command to an asynchronous HCI request */
2520int hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2521{
2522 struct hci_dev *hdev = req->hdev;
2523 struct sk_buff *skb;
2524
2525 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2526
2527 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2528 if (!skb) {
2529 BT_ERR("%s no memory for command", hdev->name);
2530 return -ENOMEM;
2531 }
2532
2533 if (skb_queue_empty(&req->cmd_q))
2534 bt_cb(skb)->req.start = true;
2535
2536 skb_queue_tail(&req->cmd_q, skb);
2537
2538 return 0;
2539}
2540
1da177e4 2541/* Get data from the previously sent command */
a9de9248 2542void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2543{
2544 struct hci_command_hdr *hdr;
2545
2546 if (!hdev->sent_cmd)
2547 return NULL;
2548
2549 hdr = (void *) hdev->sent_cmd->data;
2550
a9de9248 2551 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2552 return NULL;
2553
f0e09510 2554 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2555
2556 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2557}
2558
2559/* Send ACL data */
2560static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2561{
2562 struct hci_acl_hdr *hdr;
2563 int len = skb->len;
2564
badff6d0
ACM
2565 skb_push(skb, HCI_ACL_HDR_SIZE);
2566 skb_reset_transport_header(skb);
9c70220b 2567 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2568 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2569 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2570}
2571
ee22be7e 2572static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2573 struct sk_buff *skb, __u16 flags)
1da177e4 2574{
ee22be7e 2575 struct hci_conn *conn = chan->conn;
1da177e4
LT
2576 struct hci_dev *hdev = conn->hdev;
2577 struct sk_buff *list;
2578
087bfd99
GP
2579 skb->len = skb_headlen(skb);
2580 skb->data_len = 0;
2581
2582 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2583
2584 switch (hdev->dev_type) {
2585 case HCI_BREDR:
2586 hci_add_acl_hdr(skb, conn->handle, flags);
2587 break;
2588 case HCI_AMP:
2589 hci_add_acl_hdr(skb, chan->handle, flags);
2590 break;
2591 default:
2592 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2593 return;
2594 }
087bfd99 2595
70f23020
AE
2596 list = skb_shinfo(skb)->frag_list;
2597 if (!list) {
1da177e4
LT
2598 /* Non fragmented */
2599 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2600
73d80deb 2601 skb_queue_tail(queue, skb);
1da177e4
LT
2602 } else {
2603 /* Fragmented */
2604 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2605
2606 skb_shinfo(skb)->frag_list = NULL;
2607
2608 /* Queue all fragments atomically */
af3e6359 2609 spin_lock(&queue->lock);
1da177e4 2610
73d80deb 2611 __skb_queue_tail(queue, skb);
e702112f
AE
2612
2613 flags &= ~ACL_START;
2614 flags |= ACL_CONT;
1da177e4
LT
2615 do {
2616 skb = list; list = list->next;
8e87d142 2617
1da177e4 2618 skb->dev = (void *) hdev;
0d48d939 2619 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2620 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2621
2622 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2623
73d80deb 2624 __skb_queue_tail(queue, skb);
1da177e4
LT
2625 } while (list);
2626
af3e6359 2627 spin_unlock(&queue->lock);
1da177e4 2628 }
73d80deb
LAD
2629}
2630
2631void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2632{
ee22be7e 2633 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2634
f0e09510 2635 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2636
2637 skb->dev = (void *) hdev;
73d80deb 2638
ee22be7e 2639 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2640
3eff45ea 2641 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2642}
1da177e4
LT
2643
2644/* Send SCO data */
0d861d8b 2645void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2646{
2647 struct hci_dev *hdev = conn->hdev;
2648 struct hci_sco_hdr hdr;
2649
2650 BT_DBG("%s len %d", hdev->name, skb->len);
2651
aca3192c 2652 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2653 hdr.dlen = skb->len;
2654
badff6d0
ACM
2655 skb_push(skb, HCI_SCO_HDR_SIZE);
2656 skb_reset_transport_header(skb);
9c70220b 2657 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2658
2659 skb->dev = (void *) hdev;
0d48d939 2660 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2661
1da177e4 2662 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2663 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2664}
1da177e4
LT
2665
2666/* ---- HCI TX task (outgoing data) ---- */
2667
2668/* HCI Connection scheduler */
6039aa73
GP
2669static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2670 int *quote)
1da177e4
LT
2671{
2672 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2673 struct hci_conn *conn = NULL, *c;
abc5de8f 2674 unsigned int num = 0, min = ~0;
1da177e4 2675
8e87d142 2676 /* We don't have to lock device here. Connections are always
1da177e4 2677 * added and removed with TX task disabled. */
bf4c6325
GP
2678
2679 rcu_read_lock();
2680
2681 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2682 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2683 continue;
769be974
MH
2684
2685 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2686 continue;
2687
1da177e4
LT
2688 num++;
2689
2690 if (c->sent < min) {
2691 min = c->sent;
2692 conn = c;
2693 }
52087a79
LAD
2694
2695 if (hci_conn_num(hdev, type) == num)
2696 break;
1da177e4
LT
2697 }
2698
bf4c6325
GP
2699 rcu_read_unlock();
2700
1da177e4 2701 if (conn) {
6ed58ec5
VT
2702 int cnt, q;
2703
2704 switch (conn->type) {
2705 case ACL_LINK:
2706 cnt = hdev->acl_cnt;
2707 break;
2708 case SCO_LINK:
2709 case ESCO_LINK:
2710 cnt = hdev->sco_cnt;
2711 break;
2712 case LE_LINK:
2713 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2714 break;
2715 default:
2716 cnt = 0;
2717 BT_ERR("Unknown link type");
2718 }
2719
2720 q = cnt / num;
1da177e4
LT
2721 *quote = q ? q : 1;
2722 } else
2723 *quote = 0;
2724
2725 BT_DBG("conn %p quote %d", conn, *quote);
2726 return conn;
2727}
2728
6039aa73 2729static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2730{
2731 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2732 struct hci_conn *c;
1da177e4 2733
bae1f5d9 2734 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2735
bf4c6325
GP
2736 rcu_read_lock();
2737
1da177e4 2738 /* Kill stalled connections */
bf4c6325 2739 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2740 if (c->type == type && c->sent) {
6ed93dc6
AE
2741 BT_ERR("%s killing stalled connection %pMR",
2742 hdev->name, &c->dst);
bed71748 2743 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2744 }
2745 }
bf4c6325
GP
2746
2747 rcu_read_unlock();
1da177e4
LT
2748}
2749
6039aa73
GP
2750static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2751 int *quote)
1da177e4 2752{
73d80deb
LAD
2753 struct hci_conn_hash *h = &hdev->conn_hash;
2754 struct hci_chan *chan = NULL;
abc5de8f 2755 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2756 struct hci_conn *conn;
73d80deb
LAD
2757 int cnt, q, conn_num = 0;
2758
2759 BT_DBG("%s", hdev->name);
2760
bf4c6325
GP
2761 rcu_read_lock();
2762
2763 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2764 struct hci_chan *tmp;
2765
2766 if (conn->type != type)
2767 continue;
2768
2769 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2770 continue;
2771
2772 conn_num++;
2773
8192edef 2774 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2775 struct sk_buff *skb;
2776
2777 if (skb_queue_empty(&tmp->data_q))
2778 continue;
2779
2780 skb = skb_peek(&tmp->data_q);
2781 if (skb->priority < cur_prio)
2782 continue;
2783
2784 if (skb->priority > cur_prio) {
2785 num = 0;
2786 min = ~0;
2787 cur_prio = skb->priority;
2788 }
2789
2790 num++;
2791
2792 if (conn->sent < min) {
2793 min = conn->sent;
2794 chan = tmp;
2795 }
2796 }
2797
2798 if (hci_conn_num(hdev, type) == conn_num)
2799 break;
2800 }
2801
bf4c6325
GP
2802 rcu_read_unlock();
2803
73d80deb
LAD
2804 if (!chan)
2805 return NULL;
2806
2807 switch (chan->conn->type) {
2808 case ACL_LINK:
2809 cnt = hdev->acl_cnt;
2810 break;
bd1eb66b
AE
2811 case AMP_LINK:
2812 cnt = hdev->block_cnt;
2813 break;
73d80deb
LAD
2814 case SCO_LINK:
2815 case ESCO_LINK:
2816 cnt = hdev->sco_cnt;
2817 break;
2818 case LE_LINK:
2819 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2820 break;
2821 default:
2822 cnt = 0;
2823 BT_ERR("Unknown link type");
2824 }
2825
2826 q = cnt / num;
2827 *quote = q ? q : 1;
2828 BT_DBG("chan %p quote %d", chan, *quote);
2829 return chan;
2830}
2831
02b20f0b
LAD
2832static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2833{
2834 struct hci_conn_hash *h = &hdev->conn_hash;
2835 struct hci_conn *conn;
2836 int num = 0;
2837
2838 BT_DBG("%s", hdev->name);
2839
bf4c6325
GP
2840 rcu_read_lock();
2841
2842 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2843 struct hci_chan *chan;
2844
2845 if (conn->type != type)
2846 continue;
2847
2848 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2849 continue;
2850
2851 num++;
2852
8192edef 2853 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2854 struct sk_buff *skb;
2855
2856 if (chan->sent) {
2857 chan->sent = 0;
2858 continue;
2859 }
2860
2861 if (skb_queue_empty(&chan->data_q))
2862 continue;
2863
2864 skb = skb_peek(&chan->data_q);
2865 if (skb->priority >= HCI_PRIO_MAX - 1)
2866 continue;
2867
2868 skb->priority = HCI_PRIO_MAX - 1;
2869
2870 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2871 skb->priority);
02b20f0b
LAD
2872 }
2873
2874 if (hci_conn_num(hdev, type) == num)
2875 break;
2876 }
bf4c6325
GP
2877
2878 rcu_read_unlock();
2879
02b20f0b
LAD
2880}
2881
b71d385a
AE
2882static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2883{
2884 /* Calculate count of blocks used by this packet */
2885 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2886}
2887
6039aa73 2888static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2889{
1da177e4
LT
2890 if (!test_bit(HCI_RAW, &hdev->flags)) {
2891 /* ACL tx timeout must be longer than maximum
2892 * link supervision timeout (40.9 seconds) */
63d2bc1b 2893 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2894 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2895 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2896 }
63d2bc1b 2897}
1da177e4 2898
6039aa73 2899static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2900{
2901 unsigned int cnt = hdev->acl_cnt;
2902 struct hci_chan *chan;
2903 struct sk_buff *skb;
2904 int quote;
2905
2906 __check_timeout(hdev, cnt);
04837f64 2907
73d80deb 2908 while (hdev->acl_cnt &&
a8c5fb1a 2909 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2910 u32 priority = (skb_peek(&chan->data_q))->priority;
2911 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2912 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2913 skb->len, skb->priority);
73d80deb 2914
ec1cce24
LAD
2915 /* Stop if priority has changed */
2916 if (skb->priority < priority)
2917 break;
2918
2919 skb = skb_dequeue(&chan->data_q);
2920
73d80deb 2921 hci_conn_enter_active_mode(chan->conn,
04124681 2922 bt_cb(skb)->force_active);
04837f64 2923
1da177e4
LT
2924 hci_send_frame(skb);
2925 hdev->acl_last_tx = jiffies;
2926
2927 hdev->acl_cnt--;
73d80deb
LAD
2928 chan->sent++;
2929 chan->conn->sent++;
1da177e4
LT
2930 }
2931 }
02b20f0b
LAD
2932
2933 if (cnt != hdev->acl_cnt)
2934 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2935}
2936
6039aa73 2937static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2938{
63d2bc1b 2939 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2940 struct hci_chan *chan;
2941 struct sk_buff *skb;
2942 int quote;
bd1eb66b 2943 u8 type;
b71d385a 2944
63d2bc1b 2945 __check_timeout(hdev, cnt);
b71d385a 2946
bd1eb66b
AE
2947 BT_DBG("%s", hdev->name);
2948
2949 if (hdev->dev_type == HCI_AMP)
2950 type = AMP_LINK;
2951 else
2952 type = ACL_LINK;
2953
b71d385a 2954 while (hdev->block_cnt > 0 &&
bd1eb66b 2955 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
2956 u32 priority = (skb_peek(&chan->data_q))->priority;
2957 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2958 int blocks;
2959
2960 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2961 skb->len, skb->priority);
b71d385a
AE
2962
2963 /* Stop if priority has changed */
2964 if (skb->priority < priority)
2965 break;
2966
2967 skb = skb_dequeue(&chan->data_q);
2968
2969 blocks = __get_blocks(hdev, skb);
2970 if (blocks > hdev->block_cnt)
2971 return;
2972
2973 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2974 bt_cb(skb)->force_active);
b71d385a
AE
2975
2976 hci_send_frame(skb);
2977 hdev->acl_last_tx = jiffies;
2978
2979 hdev->block_cnt -= blocks;
2980 quote -= blocks;
2981
2982 chan->sent += blocks;
2983 chan->conn->sent += blocks;
2984 }
2985 }
2986
2987 if (cnt != hdev->block_cnt)
bd1eb66b 2988 hci_prio_recalculate(hdev, type);
b71d385a
AE
2989}
2990
6039aa73 2991static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
2992{
2993 BT_DBG("%s", hdev->name);
2994
bd1eb66b
AE
2995 /* No ACL link over BR/EDR controller */
2996 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2997 return;
2998
2999 /* No AMP link over AMP controller */
3000 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3001 return;
3002
3003 switch (hdev->flow_ctl_mode) {
3004 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3005 hci_sched_acl_pkt(hdev);
3006 break;
3007
3008 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3009 hci_sched_acl_blk(hdev);
3010 break;
3011 }
3012}
3013
1da177e4 3014/* Schedule SCO */
6039aa73 3015static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3016{
3017 struct hci_conn *conn;
3018 struct sk_buff *skb;
3019 int quote;
3020
3021 BT_DBG("%s", hdev->name);
3022
52087a79
LAD
3023 if (!hci_conn_num(hdev, SCO_LINK))
3024 return;
3025
1da177e4
LT
3026 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3027 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3028 BT_DBG("skb %p len %d", skb, skb->len);
3029 hci_send_frame(skb);
3030
3031 conn->sent++;
3032 if (conn->sent == ~0)
3033 conn->sent = 0;
3034 }
3035 }
3036}
3037
6039aa73 3038static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3039{
3040 struct hci_conn *conn;
3041 struct sk_buff *skb;
3042 int quote;
3043
3044 BT_DBG("%s", hdev->name);
3045
52087a79
LAD
3046 if (!hci_conn_num(hdev, ESCO_LINK))
3047 return;
3048
8fc9ced3
GP
3049 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3050 &quote))) {
b6a0dc82
MH
3051 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3052 BT_DBG("skb %p len %d", skb, skb->len);
3053 hci_send_frame(skb);
3054
3055 conn->sent++;
3056 if (conn->sent == ~0)
3057 conn->sent = 0;
3058 }
3059 }
3060}
3061
6039aa73 3062static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3063{
73d80deb 3064 struct hci_chan *chan;
6ed58ec5 3065 struct sk_buff *skb;
02b20f0b 3066 int quote, cnt, tmp;
6ed58ec5
VT
3067
3068 BT_DBG("%s", hdev->name);
3069
52087a79
LAD
3070 if (!hci_conn_num(hdev, LE_LINK))
3071 return;
3072
6ed58ec5
VT
3073 if (!test_bit(HCI_RAW, &hdev->flags)) {
3074 /* LE tx timeout must be longer than maximum
3075 * link supervision timeout (40.9 seconds) */
bae1f5d9 3076 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3077 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3078 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3079 }
3080
3081 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3082 tmp = cnt;
73d80deb 3083 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3084 u32 priority = (skb_peek(&chan->data_q))->priority;
3085 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3086 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3087 skb->len, skb->priority);
6ed58ec5 3088
ec1cce24
LAD
3089 /* Stop if priority has changed */
3090 if (skb->priority < priority)
3091 break;
3092
3093 skb = skb_dequeue(&chan->data_q);
3094
6ed58ec5
VT
3095 hci_send_frame(skb);
3096 hdev->le_last_tx = jiffies;
3097
3098 cnt--;
73d80deb
LAD
3099 chan->sent++;
3100 chan->conn->sent++;
6ed58ec5
VT
3101 }
3102 }
73d80deb 3103
6ed58ec5
VT
3104 if (hdev->le_pkts)
3105 hdev->le_cnt = cnt;
3106 else
3107 hdev->acl_cnt = cnt;
02b20f0b
LAD
3108
3109 if (cnt != tmp)
3110 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3111}
3112
3eff45ea 3113static void hci_tx_work(struct work_struct *work)
1da177e4 3114{
3eff45ea 3115 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3116 struct sk_buff *skb;
3117
6ed58ec5 3118 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3119 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3120
3121 /* Schedule queues and send stuff to HCI driver */
3122
3123 hci_sched_acl(hdev);
3124
3125 hci_sched_sco(hdev);
3126
b6a0dc82
MH
3127 hci_sched_esco(hdev);
3128
6ed58ec5
VT
3129 hci_sched_le(hdev);
3130
1da177e4
LT
3131 /* Send next queued raw (unknown type) packet */
3132 while ((skb = skb_dequeue(&hdev->raw_q)))
3133 hci_send_frame(skb);
1da177e4
LT
3134}
3135
25985edc 3136/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3137
3138/* ACL data packet */
6039aa73 3139static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3140{
3141 struct hci_acl_hdr *hdr = (void *) skb->data;
3142 struct hci_conn *conn;
3143 __u16 handle, flags;
3144
3145 skb_pull(skb, HCI_ACL_HDR_SIZE);
3146
3147 handle = __le16_to_cpu(hdr->handle);
3148 flags = hci_flags(handle);
3149 handle = hci_handle(handle);
3150
f0e09510 3151 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3152 handle, flags);
1da177e4
LT
3153
3154 hdev->stat.acl_rx++;
3155
3156 hci_dev_lock(hdev);
3157 conn = hci_conn_hash_lookup_handle(hdev, handle);
3158 hci_dev_unlock(hdev);
8e87d142 3159
1da177e4 3160 if (conn) {
65983fc7 3161 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3162
1da177e4 3163 /* Send to upper protocol */
686ebf28
UF
3164 l2cap_recv_acldata(conn, skb, flags);
3165 return;
1da177e4 3166 } else {
8e87d142 3167 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3168 hdev->name, handle);
1da177e4
LT
3169 }
3170
3171 kfree_skb(skb);
3172}
3173
3174/* SCO data packet */
6039aa73 3175static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3176{
3177 struct hci_sco_hdr *hdr = (void *) skb->data;
3178 struct hci_conn *conn;
3179 __u16 handle;
3180
3181 skb_pull(skb, HCI_SCO_HDR_SIZE);
3182
3183 handle = __le16_to_cpu(hdr->handle);
3184
f0e09510 3185 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3186
3187 hdev->stat.sco_rx++;
3188
3189 hci_dev_lock(hdev);
3190 conn = hci_conn_hash_lookup_handle(hdev, handle);
3191 hci_dev_unlock(hdev);
3192
3193 if (conn) {
1da177e4 3194 /* Send to upper protocol */
686ebf28
UF
3195 sco_recv_scodata(conn, skb);
3196 return;
1da177e4 3197 } else {
8e87d142 3198 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3199 hdev->name, handle);
1da177e4
LT
3200 }
3201
3202 kfree_skb(skb);
3203}
3204
9238f36a
JH
3205static bool hci_req_is_complete(struct hci_dev *hdev)
3206{
3207 struct sk_buff *skb;
3208
3209 skb = skb_peek(&hdev->cmd_q);
3210 if (!skb)
3211 return true;
3212
3213 return bt_cb(skb)->req.start;
3214}
3215
42c6b129
JH
3216static void hci_resend_last(struct hci_dev *hdev)
3217{
3218 struct hci_command_hdr *sent;
3219 struct sk_buff *skb;
3220 u16 opcode;
3221
3222 if (!hdev->sent_cmd)
3223 return;
3224
3225 sent = (void *) hdev->sent_cmd->data;
3226 opcode = __le16_to_cpu(sent->opcode);
3227 if (opcode == HCI_OP_RESET)
3228 return;
3229
3230 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3231 if (!skb)
3232 return;
3233
3234 skb_queue_head(&hdev->cmd_q, skb);
3235 queue_work(hdev->workqueue, &hdev->cmd_work);
3236}
3237
9238f36a
JH
3238void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3239{
3240 hci_req_complete_t req_complete = NULL;
3241 struct sk_buff *skb;
3242 unsigned long flags;
3243
3244 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3245
42c6b129
JH
3246 /* If the completed command doesn't match the last one that was
3247 * sent we need to do special handling of it.
9238f36a 3248 */
42c6b129
JH
3249 if (!hci_sent_cmd_data(hdev, opcode)) {
3250 /* Some CSR based controllers generate a spontaneous
3251 * reset complete event during init and any pending
3252 * command will never be completed. In such a case we
3253 * need to resend whatever was the last sent
3254 * command.
3255 */
3256 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3257 hci_resend_last(hdev);
3258
9238f36a 3259 return;
42c6b129 3260 }
9238f36a
JH
3261
3262 /* If the command succeeded and there's still more commands in
3263 * this request the request is not yet complete.
3264 */
3265 if (!status && !hci_req_is_complete(hdev))
3266 return;
3267
3268 /* If this was the last command in a request the complete
3269 * callback would be found in hdev->sent_cmd instead of the
3270 * command queue (hdev->cmd_q).
3271 */
3272 if (hdev->sent_cmd) {
3273 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3274 if (req_complete)
3275 goto call_complete;
3276 }
3277
3278 /* Remove all pending commands belonging to this request */
3279 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3280 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3281 if (bt_cb(skb)->req.start) {
3282 __skb_queue_head(&hdev->cmd_q, skb);
3283 break;
3284 }
3285
3286 req_complete = bt_cb(skb)->req.complete;
3287 kfree_skb(skb);
3288 }
3289 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3290
3291call_complete:
3292 if (req_complete)
3293 req_complete(hdev, status);
3294}
3295
3296void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3297{
3298 hci_req_complete_t req_complete = NULL;
3299
3300 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3301
3302 if (status) {
3303 hci_req_cmd_complete(hdev, opcode, status);
3304 return;
3305 }
3306
3307 /* No need to handle success status if there are more commands */
3308 if (!hci_req_is_complete(hdev))
3309 return;
3310
3311 if (hdev->sent_cmd)
3312 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3313
3314 /* If the request doesn't have a complete callback or there
3315 * are other commands/requests in the hdev queue we consider
3316 * this request as completed.
3317 */
3318 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3319 hci_req_cmd_complete(hdev, opcode, status);
3320}
3321
b78752cc 3322static void hci_rx_work(struct work_struct *work)
1da177e4 3323{
b78752cc 3324 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3325 struct sk_buff *skb;
3326
3327 BT_DBG("%s", hdev->name);
3328
1da177e4 3329 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3330 /* Send copy to monitor */
3331 hci_send_to_monitor(hdev, skb);
3332
1da177e4
LT
3333 if (atomic_read(&hdev->promisc)) {
3334 /* Send copy to the sockets */
470fe1b5 3335 hci_send_to_sock(hdev, skb);
1da177e4
LT
3336 }
3337
3338 if (test_bit(HCI_RAW, &hdev->flags)) {
3339 kfree_skb(skb);
3340 continue;
3341 }
3342
3343 if (test_bit(HCI_INIT, &hdev->flags)) {
3344 /* Don't process data packets in this states. */
0d48d939 3345 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3346 case HCI_ACLDATA_PKT:
3347 case HCI_SCODATA_PKT:
3348 kfree_skb(skb);
3349 continue;
3ff50b79 3350 }
1da177e4
LT
3351 }
3352
3353 /* Process frame */
0d48d939 3354 switch (bt_cb(skb)->pkt_type) {
1da177e4 3355 case HCI_EVENT_PKT:
b78752cc 3356 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3357 hci_event_packet(hdev, skb);
3358 break;
3359
3360 case HCI_ACLDATA_PKT:
3361 BT_DBG("%s ACL data packet", hdev->name);
3362 hci_acldata_packet(hdev, skb);
3363 break;
3364
3365 case HCI_SCODATA_PKT:
3366 BT_DBG("%s SCO data packet", hdev->name);
3367 hci_scodata_packet(hdev, skb);
3368 break;
3369
3370 default:
3371 kfree_skb(skb);
3372 break;
3373 }
3374 }
1da177e4
LT
3375}
3376
c347b765 3377static void hci_cmd_work(struct work_struct *work)
1da177e4 3378{
c347b765 3379 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3380 struct sk_buff *skb;
3381
2104786b
AE
3382 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3383 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3384
1da177e4 3385 /* Send queued commands */
5a08ecce
AE
3386 if (atomic_read(&hdev->cmd_cnt)) {
3387 skb = skb_dequeue(&hdev->cmd_q);
3388 if (!skb)
3389 return;
3390
7585b97a 3391 kfree_skb(hdev->sent_cmd);
1da177e4 3392
70f23020
AE
3393 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3394 if (hdev->sent_cmd) {
1da177e4
LT
3395 atomic_dec(&hdev->cmd_cnt);
3396 hci_send_frame(skb);
7bdb8a5c
SJ
3397 if (test_bit(HCI_RESET, &hdev->flags))
3398 del_timer(&hdev->cmd_timer);
3399 else
3400 mod_timer(&hdev->cmd_timer,
5f246e89 3401 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3402 } else {
3403 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3404 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3405 }
3406 }
3407}
2519a1fc
AG
3408
3409int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3410{
3411 /* General inquiry access code (GIAC) */
3412 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3413 struct hci_cp_inquiry cp;
3414
3415 BT_DBG("%s", hdev->name);
3416
3417 if (test_bit(HCI_INQUIRY, &hdev->flags))
3418 return -EINPROGRESS;
3419
4663262c
JH
3420 inquiry_cache_flush(hdev);
3421
2519a1fc
AG
3422 memset(&cp, 0, sizeof(cp));
3423 memcpy(&cp.lap, lap, sizeof(cp.lap));
3424 cp.length = length;
3425
3426 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3427}
023d5049
AG
3428
3429int hci_cancel_inquiry(struct hci_dev *hdev)
3430{
3431 BT_DBG("%s", hdev->name);
3432
3433 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 3434 return -EALREADY;
023d5049
AG
3435
3436 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3437}
31f7956c
AG
3438
3439u8 bdaddr_to_le(u8 bdaddr_type)
3440{
3441 switch (bdaddr_type) {
3442 case BDADDR_LE_PUBLIC:
3443 return ADDR_LE_DEV_PUBLIC;
3444
3445 default:
3446 /* Fallback to LE Random address type */
3447 return ADDR_LE_DEV_RANDOM;
3448 }
3449}