Bluetooth: Fix HCI command send functions to use const specifier
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
7b1abbbe 82struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event)
75e84b7c
JH
83{
84 struct hci_ev_cmd_complete *ev;
85 struct hci_event_hdr *hdr;
86 struct sk_buff *skb;
87
88 hci_dev_lock(hdev);
89
90 skb = hdev->recv_evt;
91 hdev->recv_evt = NULL;
92
93 hci_dev_unlock(hdev);
94
95 if (!skb)
96 return ERR_PTR(-ENODATA);
97
98 if (skb->len < sizeof(*hdr)) {
99 BT_ERR("Too short HCI event");
100 goto failed;
101 }
102
103 hdr = (void *) skb->data;
104 skb_pull(skb, HCI_EVENT_HDR_SIZE);
105
7b1abbbe
JH
106 if (event) {
107 if (hdr->evt != event)
108 goto failed;
109 return skb;
110 }
111
75e84b7c
JH
112 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
113 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
114 goto failed;
115 }
116
117 if (skb->len < sizeof(*ev)) {
118 BT_ERR("Too short cmd_complete event");
119 goto failed;
120 }
121
122 ev = (void *) skb->data;
123 skb_pull(skb, sizeof(*ev));
124
125 if (opcode == __le16_to_cpu(ev->opcode))
126 return skb;
127
128 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
129 __le16_to_cpu(ev->opcode));
130
131failed:
132 kfree_skb(skb);
133 return ERR_PTR(-ENODATA);
134}
135
7b1abbbe 136struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 137 const void *param, u8 event, u32 timeout)
75e84b7c
JH
138{
139 DECLARE_WAITQUEUE(wait, current);
140 struct hci_request req;
141 int err = 0;
142
143 BT_DBG("%s", hdev->name);
144
145 hci_req_init(&req, hdev);
146
7b1abbbe 147 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
148
149 hdev->req_status = HCI_REQ_PEND;
150
151 err = hci_req_run(&req, hci_req_sync_complete);
152 if (err < 0)
153 return ERR_PTR(err);
154
155 add_wait_queue(&hdev->req_wait_q, &wait);
156 set_current_state(TASK_INTERRUPTIBLE);
157
158 schedule_timeout(timeout);
159
160 remove_wait_queue(&hdev->req_wait_q, &wait);
161
162 if (signal_pending(current))
163 return ERR_PTR(-EINTR);
164
165 switch (hdev->req_status) {
166 case HCI_REQ_DONE:
167 err = -bt_to_errno(hdev->req_result);
168 break;
169
170 case HCI_REQ_CANCELED:
171 err = -hdev->req_result;
172 break;
173
174 default:
175 err = -ETIMEDOUT;
176 break;
177 }
178
179 hdev->req_status = hdev->req_result = 0;
180
181 BT_DBG("%s end: err %d", hdev->name, err);
182
183 if (err < 0)
184 return ERR_PTR(err);
185
7b1abbbe
JH
186 return hci_get_cmd_complete(hdev, opcode, event);
187}
188EXPORT_SYMBOL(__hci_cmd_sync_ev);
189
190struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 191 const void *param, u32 timeout)
7b1abbbe
JH
192{
193 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
194}
195EXPORT_SYMBOL(__hci_cmd_sync);
196
1da177e4 197/* Execute request and wait for completion. */
01178cd4 198static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
199 void (*func)(struct hci_request *req,
200 unsigned long opt),
01178cd4 201 unsigned long opt, __u32 timeout)
1da177e4 202{
42c6b129 203 struct hci_request req;
1da177e4
LT
204 DECLARE_WAITQUEUE(wait, current);
205 int err = 0;
206
207 BT_DBG("%s start", hdev->name);
208
42c6b129
JH
209 hci_req_init(&req, hdev);
210
1da177e4
LT
211 hdev->req_status = HCI_REQ_PEND;
212
42c6b129 213 func(&req, opt);
53cce22d 214
42c6b129
JH
215 err = hci_req_run(&req, hci_req_sync_complete);
216 if (err < 0) {
53cce22d 217 hdev->req_status = 0;
920c8300
AG
218
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
42c6b129 223 */
920c8300
AG
224 if (err == -ENODATA)
225 return 0;
226
227 return err;
53cce22d
JH
228 }
229
bc4445c7
AG
230 add_wait_queue(&hdev->req_wait_q, &wait);
231 set_current_state(TASK_INTERRUPTIBLE);
232
1da177e4
LT
233 schedule_timeout(timeout);
234
235 remove_wait_queue(&hdev->req_wait_q, &wait);
236
237 if (signal_pending(current))
238 return -EINTR;
239
240 switch (hdev->req_status) {
241 case HCI_REQ_DONE:
e175072f 242 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
243 break;
244
245 case HCI_REQ_CANCELED:
246 err = -hdev->req_result;
247 break;
248
249 default:
250 err = -ETIMEDOUT;
251 break;
3ff50b79 252 }
1da177e4 253
a5040efa 254 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
255
256 BT_DBG("%s end: err %d", hdev->name, err);
257
258 return err;
259}
260
01178cd4 261static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
262 void (*req)(struct hci_request *req,
263 unsigned long opt),
01178cd4 264 unsigned long opt, __u32 timeout)
1da177e4
LT
265{
266 int ret;
267
7c6a329e
MH
268 if (!test_bit(HCI_UP, &hdev->flags))
269 return -ENETDOWN;
270
1da177e4
LT
271 /* Serialize all requests */
272 hci_req_lock(hdev);
01178cd4 273 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
274 hci_req_unlock(hdev);
275
276 return ret;
277}
278
42c6b129 279static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 280{
42c6b129 281 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
282
283 /* Reset device */
42c6b129
JH
284 set_bit(HCI_RESET, &req->hdev->flags);
285 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
286}
287
42c6b129 288static void bredr_init(struct hci_request *req)
1da177e4 289{
42c6b129 290 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 291
1da177e4 292 /* Read Local Supported Features */
42c6b129 293 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 294
1143e5a6 295 /* Read Local Version */
42c6b129 296 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
297
298 /* Read BD Address */
42c6b129 299 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
300}
301
42c6b129 302static void amp_init(struct hci_request *req)
e61ef499 303{
42c6b129 304 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 305
e61ef499 306 /* Read Local Version */
42c6b129 307 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
308
309 /* Read Local AMP Info */
42c6b129 310 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
311
312 /* Read Data Blk size */
42c6b129 313 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
314}
315
42c6b129 316static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 317{
42c6b129 318 struct hci_dev *hdev = req->hdev;
e61ef499
AE
319
320 BT_DBG("%s %ld", hdev->name, opt);
321
11778716
AE
322 /* Reset */
323 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 324 hci_reset_req(req, 0);
11778716 325
e61ef499
AE
326 switch (hdev->dev_type) {
327 case HCI_BREDR:
42c6b129 328 bredr_init(req);
e61ef499
AE
329 break;
330
331 case HCI_AMP:
42c6b129 332 amp_init(req);
e61ef499
AE
333 break;
334
335 default:
336 BT_ERR("Unknown device type %d", hdev->dev_type);
337 break;
338 }
e61ef499
AE
339}
340
42c6b129 341static void bredr_setup(struct hci_request *req)
2177bab5
JH
342{
343 struct hci_cp_delete_stored_link_key cp;
344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
349
350 /* Read Class of Device */
42c6b129 351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
352
353 /* Read Local Name */
42c6b129 354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
355
356 /* Read Voice Setting */
42c6b129 357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
42c6b129 365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
366
367 bacpy(&cp.bdaddr, BDADDR_ANY);
368 cp.delete_all = 0x01;
42c6b129 369 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
f332ec66
JH
370
371 /* Read page scan parameters */
372 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
373 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
375 }
2177bab5
JH
376}
377
42c6b129 378static void le_setup(struct hci_request *req)
2177bab5
JH
379{
380 /* Read LE Buffer Size */
42c6b129 381 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
382
383 /* Read LE Local Supported Features */
42c6b129 384 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
385
386 /* Read LE Advertising Channel TX Power */
42c6b129 387 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
388
389 /* Read LE White List Size */
42c6b129 390 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
391
392 /* Read LE Supported States */
42c6b129 393 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
2177bab5
JH
394}
395
396static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
397{
398 if (lmp_ext_inq_capable(hdev))
399 return 0x02;
400
401 if (lmp_inq_rssi_capable(hdev))
402 return 0x01;
403
404 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
405 hdev->lmp_subver == 0x0757)
406 return 0x01;
407
408 if (hdev->manufacturer == 15) {
409 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
410 return 0x01;
411 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
414 return 0x01;
415 }
416
417 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
418 hdev->lmp_subver == 0x1805)
419 return 0x01;
420
421 return 0x00;
422}
423
42c6b129 424static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
425{
426 u8 mode;
427
42c6b129 428 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 429
42c6b129 430 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
431}
432
42c6b129 433static void hci_setup_event_mask(struct hci_request *req)
2177bab5 434{
42c6b129
JH
435 struct hci_dev *hdev = req->hdev;
436
2177bab5
JH
437 /* The second byte is 0xff instead of 0x9f (two reserved bits
438 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
439 * command otherwise.
440 */
441 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
442
443 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
444 * any event mask for pre 1.2 devices.
445 */
446 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
447 return;
448
449 if (lmp_bredr_capable(hdev)) {
450 events[4] |= 0x01; /* Flow Specification Complete */
451 events[4] |= 0x02; /* Inquiry Result with RSSI */
452 events[4] |= 0x04; /* Read Remote Extended Features Complete */
453 events[5] |= 0x08; /* Synchronous Connection Complete */
454 events[5] |= 0x10; /* Synchronous Connection Changed */
455 }
456
457 if (lmp_inq_rssi_capable(hdev))
458 events[4] |= 0x02; /* Inquiry Result with RSSI */
459
460 if (lmp_sniffsubr_capable(hdev))
461 events[5] |= 0x20; /* Sniff Subrating */
462
463 if (lmp_pause_enc_capable(hdev))
464 events[5] |= 0x80; /* Encryption Key Refresh Complete */
465
466 if (lmp_ext_inq_capable(hdev))
467 events[5] |= 0x40; /* Extended Inquiry Result */
468
469 if (lmp_no_flush_capable(hdev))
470 events[7] |= 0x01; /* Enhanced Flush Complete */
471
472 if (lmp_lsto_capable(hdev))
473 events[6] |= 0x80; /* Link Supervision Timeout Changed */
474
475 if (lmp_ssp_capable(hdev)) {
476 events[6] |= 0x01; /* IO Capability Request */
477 events[6] |= 0x02; /* IO Capability Response */
478 events[6] |= 0x04; /* User Confirmation Request */
479 events[6] |= 0x08; /* User Passkey Request */
480 events[6] |= 0x10; /* Remote OOB Data Request */
481 events[6] |= 0x20; /* Simple Pairing Complete */
482 events[7] |= 0x04; /* User Passkey Notification */
483 events[7] |= 0x08; /* Keypress Notification */
484 events[7] |= 0x10; /* Remote Host Supported
485 * Features Notification
486 */
487 }
488
489 if (lmp_le_capable(hdev))
490 events[7] |= 0x20; /* LE Meta-Event */
491
42c6b129 492 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
493
494 if (lmp_le_capable(hdev)) {
495 memset(events, 0, sizeof(events));
496 events[0] = 0x1f;
42c6b129
JH
497 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
498 sizeof(events), events);
2177bab5
JH
499 }
500}
501
42c6b129 502static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 503{
42c6b129
JH
504 struct hci_dev *hdev = req->hdev;
505
2177bab5 506 if (lmp_bredr_capable(hdev))
42c6b129 507 bredr_setup(req);
2177bab5
JH
508
509 if (lmp_le_capable(hdev))
42c6b129 510 le_setup(req);
2177bab5 511
42c6b129 512 hci_setup_event_mask(req);
2177bab5
JH
513
514 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 515 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
516
517 if (lmp_ssp_capable(hdev)) {
518 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
519 u8 mode = 0x01;
42c6b129
JH
520 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
521 sizeof(mode), &mode);
2177bab5
JH
522 } else {
523 struct hci_cp_write_eir cp;
524
525 memset(hdev->eir, 0, sizeof(hdev->eir));
526 memset(&cp, 0, sizeof(cp));
527
42c6b129 528 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
529 }
530 }
531
532 if (lmp_inq_rssi_capable(hdev))
42c6b129 533 hci_setup_inquiry_mode(req);
2177bab5
JH
534
535 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 536 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
537
538 if (lmp_ext_feat_capable(hdev)) {
539 struct hci_cp_read_local_ext_features cp;
540
541 cp.page = 0x01;
42c6b129
JH
542 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
543 sizeof(cp), &cp);
2177bab5
JH
544 }
545
546 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
547 u8 enable = 1;
42c6b129
JH
548 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
549 &enable);
2177bab5
JH
550 }
551}
552
42c6b129 553static void hci_setup_link_policy(struct hci_request *req)
2177bab5 554{
42c6b129 555 struct hci_dev *hdev = req->hdev;
2177bab5
JH
556 struct hci_cp_write_def_link_policy cp;
557 u16 link_policy = 0;
558
559 if (lmp_rswitch_capable(hdev))
560 link_policy |= HCI_LP_RSWITCH;
561 if (lmp_hold_capable(hdev))
562 link_policy |= HCI_LP_HOLD;
563 if (lmp_sniff_capable(hdev))
564 link_policy |= HCI_LP_SNIFF;
565 if (lmp_park_capable(hdev))
566 link_policy |= HCI_LP_PARK;
567
568 cp.policy = cpu_to_le16(link_policy);
42c6b129 569 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
570}
571
42c6b129 572static void hci_set_le_support(struct hci_request *req)
2177bab5 573{
42c6b129 574 struct hci_dev *hdev = req->hdev;
2177bab5
JH
575 struct hci_cp_write_le_host_supported cp;
576
577 memset(&cp, 0, sizeof(cp));
578
579 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
580 cp.le = 0x01;
581 cp.simul = lmp_le_br_capable(hdev);
582 }
583
584 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
585 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
586 &cp);
2177bab5
JH
587}
588
42c6b129 589static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 590{
42c6b129 591 struct hci_dev *hdev = req->hdev;
d2c5d77f 592 u8 p;
42c6b129 593
2177bab5 594 if (hdev->commands[5] & 0x10)
42c6b129 595 hci_setup_link_policy(req);
2177bab5 596
04b4edcb 597 if (lmp_le_capable(hdev)) {
42c6b129 598 hci_set_le_support(req);
04b4edcb
JH
599 hci_update_ad(req);
600 }
d2c5d77f
JH
601
602 /* Read features beyond page 1 if available */
603 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
604 struct hci_cp_read_local_ext_features cp;
605
606 cp.page = p;
607 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
608 sizeof(cp), &cp);
609 }
2177bab5
JH
610}
611
612static int __hci_init(struct hci_dev *hdev)
613{
614 int err;
615
616 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
617 if (err < 0)
618 return err;
619
620 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
621 * BR/EDR/LE type controllers. AMP controllers only need the
622 * first stage init.
623 */
624 if (hdev->dev_type != HCI_BREDR)
625 return 0;
626
627 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
628 if (err < 0)
629 return err;
630
631 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
632}
633
42c6b129 634static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
635{
636 __u8 scan = opt;
637
42c6b129 638 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
639
640 /* Inquiry and Page scans */
42c6b129 641 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
642}
643
42c6b129 644static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
645{
646 __u8 auth = opt;
647
42c6b129 648 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
649
650 /* Authentication */
42c6b129 651 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
652}
653
42c6b129 654static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
655{
656 __u8 encrypt = opt;
657
42c6b129 658 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 659
e4e8e37c 660 /* Encryption */
42c6b129 661 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
662}
663
42c6b129 664static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
665{
666 __le16 policy = cpu_to_le16(opt);
667
42c6b129 668 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
669
670 /* Default link policy */
42c6b129 671 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
672}
673
8e87d142 674/* Get HCI device by index.
1da177e4
LT
675 * Device is held on return. */
676struct hci_dev *hci_dev_get(int index)
677{
8035ded4 678 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
679
680 BT_DBG("%d", index);
681
682 if (index < 0)
683 return NULL;
684
685 read_lock(&hci_dev_list_lock);
8035ded4 686 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
687 if (d->id == index) {
688 hdev = hci_dev_hold(d);
689 break;
690 }
691 }
692 read_unlock(&hci_dev_list_lock);
693 return hdev;
694}
1da177e4
LT
695
696/* ---- Inquiry support ---- */
ff9ef578 697
30dc78e1
JH
698bool hci_discovery_active(struct hci_dev *hdev)
699{
700 struct discovery_state *discov = &hdev->discovery;
701
6fbe195d 702 switch (discov->state) {
343f935b 703 case DISCOVERY_FINDING:
6fbe195d 704 case DISCOVERY_RESOLVING:
30dc78e1
JH
705 return true;
706
6fbe195d
AG
707 default:
708 return false;
709 }
30dc78e1
JH
710}
711
ff9ef578
JH
712void hci_discovery_set_state(struct hci_dev *hdev, int state)
713{
714 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
715
716 if (hdev->discovery.state == state)
717 return;
718
719 switch (state) {
720 case DISCOVERY_STOPPED:
7b99b659
AG
721 if (hdev->discovery.state != DISCOVERY_STARTING)
722 mgmt_discovering(hdev, 0);
ff9ef578
JH
723 break;
724 case DISCOVERY_STARTING:
725 break;
343f935b 726 case DISCOVERY_FINDING:
ff9ef578
JH
727 mgmt_discovering(hdev, 1);
728 break;
30dc78e1
JH
729 case DISCOVERY_RESOLVING:
730 break;
ff9ef578
JH
731 case DISCOVERY_STOPPING:
732 break;
733 }
734
735 hdev->discovery.state = state;
736}
737
1da177e4
LT
738static void inquiry_cache_flush(struct hci_dev *hdev)
739{
30883512 740 struct discovery_state *cache = &hdev->discovery;
b57c1a56 741 struct inquiry_entry *p, *n;
1da177e4 742
561aafbc
JH
743 list_for_each_entry_safe(p, n, &cache->all, all) {
744 list_del(&p->all);
b57c1a56 745 kfree(p);
1da177e4 746 }
561aafbc
JH
747
748 INIT_LIST_HEAD(&cache->unknown);
749 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
750}
751
a8c5fb1a
GP
752struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
753 bdaddr_t *bdaddr)
1da177e4 754{
30883512 755 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
756 struct inquiry_entry *e;
757
6ed93dc6 758 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 759
561aafbc
JH
760 list_for_each_entry(e, &cache->all, all) {
761 if (!bacmp(&e->data.bdaddr, bdaddr))
762 return e;
763 }
764
765 return NULL;
766}
767
768struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 769 bdaddr_t *bdaddr)
561aafbc 770{
30883512 771 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
772 struct inquiry_entry *e;
773
6ed93dc6 774 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
775
776 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 777 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
778 return e;
779 }
780
781 return NULL;
1da177e4
LT
782}
783
30dc78e1 784struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
785 bdaddr_t *bdaddr,
786 int state)
30dc78e1
JH
787{
788 struct discovery_state *cache = &hdev->discovery;
789 struct inquiry_entry *e;
790
6ed93dc6 791 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
792
793 list_for_each_entry(e, &cache->resolve, list) {
794 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
795 return e;
796 if (!bacmp(&e->data.bdaddr, bdaddr))
797 return e;
798 }
799
800 return NULL;
801}
802
a3d4e20a 803void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 804 struct inquiry_entry *ie)
a3d4e20a
JH
805{
806 struct discovery_state *cache = &hdev->discovery;
807 struct list_head *pos = &cache->resolve;
808 struct inquiry_entry *p;
809
810 list_del(&ie->list);
811
812 list_for_each_entry(p, &cache->resolve, list) {
813 if (p->name_state != NAME_PENDING &&
a8c5fb1a 814 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
815 break;
816 pos = &p->list;
817 }
818
819 list_add(&ie->list, pos);
820}
821
3175405b 822bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 823 bool name_known, bool *ssp)
1da177e4 824{
30883512 825 struct discovery_state *cache = &hdev->discovery;
70f23020 826 struct inquiry_entry *ie;
1da177e4 827
6ed93dc6 828 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 829
2b2fec4d
SJ
830 hci_remove_remote_oob_data(hdev, &data->bdaddr);
831
388fc8fa
JH
832 if (ssp)
833 *ssp = data->ssp_mode;
834
70f23020 835 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 836 if (ie) {
388fc8fa
JH
837 if (ie->data.ssp_mode && ssp)
838 *ssp = true;
839
a3d4e20a 840 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 841 data->rssi != ie->data.rssi) {
a3d4e20a
JH
842 ie->data.rssi = data->rssi;
843 hci_inquiry_cache_update_resolve(hdev, ie);
844 }
845
561aafbc 846 goto update;
a3d4e20a 847 }
561aafbc
JH
848
849 /* Entry not in the cache. Add new one. */
850 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
851 if (!ie)
3175405b 852 return false;
561aafbc
JH
853
854 list_add(&ie->all, &cache->all);
855
856 if (name_known) {
857 ie->name_state = NAME_KNOWN;
858 } else {
859 ie->name_state = NAME_NOT_KNOWN;
860 list_add(&ie->list, &cache->unknown);
861 }
70f23020 862
561aafbc
JH
863update:
864 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 865 ie->name_state != NAME_PENDING) {
561aafbc
JH
866 ie->name_state = NAME_KNOWN;
867 list_del(&ie->list);
1da177e4
LT
868 }
869
70f23020
AE
870 memcpy(&ie->data, data, sizeof(*data));
871 ie->timestamp = jiffies;
1da177e4 872 cache->timestamp = jiffies;
3175405b
JH
873
874 if (ie->name_state == NAME_NOT_KNOWN)
875 return false;
876
877 return true;
1da177e4
LT
878}
879
880static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
881{
30883512 882 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
883 struct inquiry_info *info = (struct inquiry_info *) buf;
884 struct inquiry_entry *e;
885 int copied = 0;
886
561aafbc 887 list_for_each_entry(e, &cache->all, all) {
1da177e4 888 struct inquiry_data *data = &e->data;
b57c1a56
JH
889
890 if (copied >= num)
891 break;
892
1da177e4
LT
893 bacpy(&info->bdaddr, &data->bdaddr);
894 info->pscan_rep_mode = data->pscan_rep_mode;
895 info->pscan_period_mode = data->pscan_period_mode;
896 info->pscan_mode = data->pscan_mode;
897 memcpy(info->dev_class, data->dev_class, 3);
898 info->clock_offset = data->clock_offset;
b57c1a56 899
1da177e4 900 info++;
b57c1a56 901 copied++;
1da177e4
LT
902 }
903
904 BT_DBG("cache %p, copied %d", cache, copied);
905 return copied;
906}
907
42c6b129 908static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
909{
910 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 911 struct hci_dev *hdev = req->hdev;
1da177e4
LT
912 struct hci_cp_inquiry cp;
913
914 BT_DBG("%s", hdev->name);
915
916 if (test_bit(HCI_INQUIRY, &hdev->flags))
917 return;
918
919 /* Start Inquiry */
920 memcpy(&cp.lap, &ir->lap, 3);
921 cp.length = ir->length;
922 cp.num_rsp = ir->num_rsp;
42c6b129 923 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
924}
925
3e13fa1e
AG
926static int wait_inquiry(void *word)
927{
928 schedule();
929 return signal_pending(current);
930}
931
1da177e4
LT
932int hci_inquiry(void __user *arg)
933{
934 __u8 __user *ptr = arg;
935 struct hci_inquiry_req ir;
936 struct hci_dev *hdev;
937 int err = 0, do_inquiry = 0, max_rsp;
938 long timeo;
939 __u8 *buf;
940
941 if (copy_from_user(&ir, ptr, sizeof(ir)))
942 return -EFAULT;
943
5a08ecce
AE
944 hdev = hci_dev_get(ir.dev_id);
945 if (!hdev)
1da177e4
LT
946 return -ENODEV;
947
09fd0de5 948 hci_dev_lock(hdev);
8e87d142 949 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 950 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
951 inquiry_cache_flush(hdev);
952 do_inquiry = 1;
953 }
09fd0de5 954 hci_dev_unlock(hdev);
1da177e4 955
04837f64 956 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
957
958 if (do_inquiry) {
01178cd4
JH
959 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
960 timeo);
70f23020
AE
961 if (err < 0)
962 goto done;
3e13fa1e
AG
963
964 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
965 * cleared). If it is interrupted by a signal, return -EINTR.
966 */
967 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
968 TASK_INTERRUPTIBLE))
969 return -EINTR;
70f23020 970 }
1da177e4 971
8fc9ced3
GP
972 /* for unlimited number of responses we will use buffer with
973 * 255 entries
974 */
1da177e4
LT
975 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
976
977 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
978 * copy it to the user space.
979 */
01df8c31 980 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 981 if (!buf) {
1da177e4
LT
982 err = -ENOMEM;
983 goto done;
984 }
985
09fd0de5 986 hci_dev_lock(hdev);
1da177e4 987 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 988 hci_dev_unlock(hdev);
1da177e4
LT
989
990 BT_DBG("num_rsp %d", ir.num_rsp);
991
992 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
993 ptr += sizeof(ir);
994 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 995 ir.num_rsp))
1da177e4 996 err = -EFAULT;
8e87d142 997 } else
1da177e4
LT
998 err = -EFAULT;
999
1000 kfree(buf);
1001
1002done:
1003 hci_dev_put(hdev);
1004 return err;
1005}
1006
3f0f524b
JH
1007static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1008{
1009 u8 ad_len = 0, flags = 0;
1010 size_t name_len;
1011
1012 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1013 flags |= LE_AD_GENERAL;
1014
1015 if (!lmp_bredr_capable(hdev))
1016 flags |= LE_AD_NO_BREDR;
1017
1018 if (lmp_le_br_capable(hdev))
1019 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1020
1021 if (lmp_host_le_br_capable(hdev))
1022 flags |= LE_AD_SIM_LE_BREDR_HOST;
1023
1024 if (flags) {
1025 BT_DBG("adv flags 0x%02x", flags);
1026
1027 ptr[0] = 2;
1028 ptr[1] = EIR_FLAGS;
1029 ptr[2] = flags;
1030
1031 ad_len += 3;
1032 ptr += 3;
1033 }
1034
1035 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1036 ptr[0] = 2;
1037 ptr[1] = EIR_TX_POWER;
1038 ptr[2] = (u8) hdev->adv_tx_power;
1039
1040 ad_len += 3;
1041 ptr += 3;
1042 }
1043
1044 name_len = strlen(hdev->dev_name);
1045 if (name_len > 0) {
1046 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1047
1048 if (name_len > max_len) {
1049 name_len = max_len;
1050 ptr[1] = EIR_NAME_SHORT;
1051 } else
1052 ptr[1] = EIR_NAME_COMPLETE;
1053
1054 ptr[0] = name_len + 1;
1055
1056 memcpy(ptr + 2, hdev->dev_name, name_len);
1057
1058 ad_len += (name_len + 2);
1059 ptr += (name_len + 2);
1060 }
1061
1062 return ad_len;
1063}
1064
04b4edcb 1065void hci_update_ad(struct hci_request *req)
3f0f524b 1066{
04b4edcb 1067 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
1068 struct hci_cp_le_set_adv_data cp;
1069 u8 len;
3f0f524b 1070
04b4edcb
JH
1071 if (!lmp_le_capable(hdev))
1072 return;
3f0f524b
JH
1073
1074 memset(&cp, 0, sizeof(cp));
1075
1076 len = create_ad(hdev, cp.data);
1077
1078 if (hdev->adv_data_len == len &&
04b4edcb
JH
1079 memcmp(cp.data, hdev->adv_data, len) == 0)
1080 return;
3f0f524b
JH
1081
1082 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1083 hdev->adv_data_len = len;
1084
1085 cp.length = len;
3f0f524b 1086
04b4edcb 1087 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
1088}
1089
1da177e4
LT
1090/* ---- HCI ioctl helpers ---- */
1091
1092int hci_dev_open(__u16 dev)
1093{
1094 struct hci_dev *hdev;
1095 int ret = 0;
1096
5a08ecce
AE
1097 hdev = hci_dev_get(dev);
1098 if (!hdev)
1da177e4
LT
1099 return -ENODEV;
1100
1101 BT_DBG("%s %p", hdev->name, hdev);
1102
1103 hci_req_lock(hdev);
1104
94324962
JH
1105 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1106 ret = -ENODEV;
1107 goto done;
1108 }
1109
611b30f7
MH
1110 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1111 ret = -ERFKILL;
1112 goto done;
1113 }
1114
1da177e4
LT
1115 if (test_bit(HCI_UP, &hdev->flags)) {
1116 ret = -EALREADY;
1117 goto done;
1118 }
1119
1da177e4
LT
1120 if (hdev->open(hdev)) {
1121 ret = -EIO;
1122 goto done;
1123 }
1124
f41c70c4
MH
1125 atomic_set(&hdev->cmd_cnt, 1);
1126 set_bit(HCI_INIT, &hdev->flags);
1127
1128 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1129 ret = hdev->setup(hdev);
1130
1131 if (!ret) {
1132 /* Treat all non BR/EDR controllers as raw devices if
1133 * enable_hs is not set.
1134 */
1135 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1136 set_bit(HCI_RAW, &hdev->flags);
1137
1138 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1139 set_bit(HCI_RAW, &hdev->flags);
1140
1141 if (!test_bit(HCI_RAW, &hdev->flags))
1142 ret = __hci_init(hdev);
1da177e4
LT
1143 }
1144
f41c70c4
MH
1145 clear_bit(HCI_INIT, &hdev->flags);
1146
1da177e4
LT
1147 if (!ret) {
1148 hci_dev_hold(hdev);
1149 set_bit(HCI_UP, &hdev->flags);
1150 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a
AE
1151 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1152 mgmt_valid_hdev(hdev)) {
09fd0de5 1153 hci_dev_lock(hdev);
744cf19e 1154 mgmt_powered(hdev, 1);
09fd0de5 1155 hci_dev_unlock(hdev);
56e5cb86 1156 }
8e87d142 1157 } else {
1da177e4 1158 /* Init failed, cleanup */
3eff45ea 1159 flush_work(&hdev->tx_work);
c347b765 1160 flush_work(&hdev->cmd_work);
b78752cc 1161 flush_work(&hdev->rx_work);
1da177e4
LT
1162
1163 skb_queue_purge(&hdev->cmd_q);
1164 skb_queue_purge(&hdev->rx_q);
1165
1166 if (hdev->flush)
1167 hdev->flush(hdev);
1168
1169 if (hdev->sent_cmd) {
1170 kfree_skb(hdev->sent_cmd);
1171 hdev->sent_cmd = NULL;
1172 }
1173
1174 hdev->close(hdev);
1175 hdev->flags = 0;
1176 }
1177
1178done:
1179 hci_req_unlock(hdev);
1180 hci_dev_put(hdev);
1181 return ret;
1182}
1183
1184static int hci_dev_do_close(struct hci_dev *hdev)
1185{
1186 BT_DBG("%s %p", hdev->name, hdev);
1187
28b75a89
AG
1188 cancel_work_sync(&hdev->le_scan);
1189
78c04c0b
VCG
1190 cancel_delayed_work(&hdev->power_off);
1191
1da177e4
LT
1192 hci_req_cancel(hdev, ENODEV);
1193 hci_req_lock(hdev);
1194
1195 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1196 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1197 hci_req_unlock(hdev);
1198 return 0;
1199 }
1200
3eff45ea
GP
1201 /* Flush RX and TX works */
1202 flush_work(&hdev->tx_work);
b78752cc 1203 flush_work(&hdev->rx_work);
1da177e4 1204
16ab91ab 1205 if (hdev->discov_timeout > 0) {
e0f9309f 1206 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1207 hdev->discov_timeout = 0;
5e5282bb 1208 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1209 }
1210
a8b2d5c2 1211 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1212 cancel_delayed_work(&hdev->service_cache);
1213
7ba8b4be
AG
1214 cancel_delayed_work_sync(&hdev->le_scan_disable);
1215
09fd0de5 1216 hci_dev_lock(hdev);
1da177e4
LT
1217 inquiry_cache_flush(hdev);
1218 hci_conn_hash_flush(hdev);
09fd0de5 1219 hci_dev_unlock(hdev);
1da177e4
LT
1220
1221 hci_notify(hdev, HCI_DEV_DOWN);
1222
1223 if (hdev->flush)
1224 hdev->flush(hdev);
1225
1226 /* Reset device */
1227 skb_queue_purge(&hdev->cmd_q);
1228 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1229 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1230 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1231 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1232 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1233 clear_bit(HCI_INIT, &hdev->flags);
1234 }
1235
c347b765
GP
1236 /* flush cmd work */
1237 flush_work(&hdev->cmd_work);
1da177e4
LT
1238
1239 /* Drop queues */
1240 skb_queue_purge(&hdev->rx_q);
1241 skb_queue_purge(&hdev->cmd_q);
1242 skb_queue_purge(&hdev->raw_q);
1243
1244 /* Drop last sent command */
1245 if (hdev->sent_cmd) {
b79f44c1 1246 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1247 kfree_skb(hdev->sent_cmd);
1248 hdev->sent_cmd = NULL;
1249 }
1250
b6ddb638
JH
1251 kfree_skb(hdev->recv_evt);
1252 hdev->recv_evt = NULL;
1253
1da177e4
LT
1254 /* After this point our queues are empty
1255 * and no tasks are scheduled. */
1256 hdev->close(hdev);
1257
35b973c9
JH
1258 /* Clear flags */
1259 hdev->flags = 0;
1260 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1261
bb4b2a9a
AE
1262 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1263 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1264 hci_dev_lock(hdev);
1265 mgmt_powered(hdev, 0);
1266 hci_dev_unlock(hdev);
1267 }
5add6af8 1268
ced5c338
AE
1269 /* Controller radio is available but is currently powered down */
1270 hdev->amp_status = 0;
1271
e59fda8d 1272 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1273 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1274
1da177e4
LT
1275 hci_req_unlock(hdev);
1276
1277 hci_dev_put(hdev);
1278 return 0;
1279}
1280
1281int hci_dev_close(__u16 dev)
1282{
1283 struct hci_dev *hdev;
1284 int err;
1285
70f23020
AE
1286 hdev = hci_dev_get(dev);
1287 if (!hdev)
1da177e4 1288 return -ENODEV;
8ee56540
MH
1289
1290 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1291 cancel_delayed_work(&hdev->power_off);
1292
1da177e4 1293 err = hci_dev_do_close(hdev);
8ee56540 1294
1da177e4
LT
1295 hci_dev_put(hdev);
1296 return err;
1297}
1298
1299int hci_dev_reset(__u16 dev)
1300{
1301 struct hci_dev *hdev;
1302 int ret = 0;
1303
70f23020
AE
1304 hdev = hci_dev_get(dev);
1305 if (!hdev)
1da177e4
LT
1306 return -ENODEV;
1307
1308 hci_req_lock(hdev);
1da177e4
LT
1309
1310 if (!test_bit(HCI_UP, &hdev->flags))
1311 goto done;
1312
1313 /* Drop queues */
1314 skb_queue_purge(&hdev->rx_q);
1315 skb_queue_purge(&hdev->cmd_q);
1316
09fd0de5 1317 hci_dev_lock(hdev);
1da177e4
LT
1318 inquiry_cache_flush(hdev);
1319 hci_conn_hash_flush(hdev);
09fd0de5 1320 hci_dev_unlock(hdev);
1da177e4
LT
1321
1322 if (hdev->flush)
1323 hdev->flush(hdev);
1324
8e87d142 1325 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1326 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1327
1328 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1329 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1330
1331done:
1da177e4
LT
1332 hci_req_unlock(hdev);
1333 hci_dev_put(hdev);
1334 return ret;
1335}
1336
1337int hci_dev_reset_stat(__u16 dev)
1338{
1339 struct hci_dev *hdev;
1340 int ret = 0;
1341
70f23020
AE
1342 hdev = hci_dev_get(dev);
1343 if (!hdev)
1da177e4
LT
1344 return -ENODEV;
1345
1346 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1347
1348 hci_dev_put(hdev);
1349
1350 return ret;
1351}
1352
1353int hci_dev_cmd(unsigned int cmd, void __user *arg)
1354{
1355 struct hci_dev *hdev;
1356 struct hci_dev_req dr;
1357 int err = 0;
1358
1359 if (copy_from_user(&dr, arg, sizeof(dr)))
1360 return -EFAULT;
1361
70f23020
AE
1362 hdev = hci_dev_get(dr.dev_id);
1363 if (!hdev)
1da177e4
LT
1364 return -ENODEV;
1365
1366 switch (cmd) {
1367 case HCISETAUTH:
01178cd4
JH
1368 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1369 HCI_INIT_TIMEOUT);
1da177e4
LT
1370 break;
1371
1372 case HCISETENCRYPT:
1373 if (!lmp_encrypt_capable(hdev)) {
1374 err = -EOPNOTSUPP;
1375 break;
1376 }
1377
1378 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1379 /* Auth must be enabled first */
01178cd4
JH
1380 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1381 HCI_INIT_TIMEOUT);
1da177e4
LT
1382 if (err)
1383 break;
1384 }
1385
01178cd4
JH
1386 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1387 HCI_INIT_TIMEOUT);
1da177e4
LT
1388 break;
1389
1390 case HCISETSCAN:
01178cd4
JH
1391 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1392 HCI_INIT_TIMEOUT);
1da177e4
LT
1393 break;
1394
1da177e4 1395 case HCISETLINKPOL:
01178cd4
JH
1396 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1397 HCI_INIT_TIMEOUT);
1da177e4
LT
1398 break;
1399
1400 case HCISETLINKMODE:
e4e8e37c
MH
1401 hdev->link_mode = ((__u16) dr.dev_opt) &
1402 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1403 break;
1404
1405 case HCISETPTYPE:
1406 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1407 break;
1408
1409 case HCISETACLMTU:
e4e8e37c
MH
1410 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1411 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1412 break;
1413
1414 case HCISETSCOMTU:
e4e8e37c
MH
1415 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1416 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1417 break;
1418
1419 default:
1420 err = -EINVAL;
1421 break;
1422 }
e4e8e37c 1423
1da177e4
LT
1424 hci_dev_put(hdev);
1425 return err;
1426}
1427
1428int hci_get_dev_list(void __user *arg)
1429{
8035ded4 1430 struct hci_dev *hdev;
1da177e4
LT
1431 struct hci_dev_list_req *dl;
1432 struct hci_dev_req *dr;
1da177e4
LT
1433 int n = 0, size, err;
1434 __u16 dev_num;
1435
1436 if (get_user(dev_num, (__u16 __user *) arg))
1437 return -EFAULT;
1438
1439 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1440 return -EINVAL;
1441
1442 size = sizeof(*dl) + dev_num * sizeof(*dr);
1443
70f23020
AE
1444 dl = kzalloc(size, GFP_KERNEL);
1445 if (!dl)
1da177e4
LT
1446 return -ENOMEM;
1447
1448 dr = dl->dev_req;
1449
f20d09d5 1450 read_lock(&hci_dev_list_lock);
8035ded4 1451 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1452 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1453 cancel_delayed_work(&hdev->power_off);
c542a06c 1454
a8b2d5c2
JH
1455 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1456 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1457
1da177e4
LT
1458 (dr + n)->dev_id = hdev->id;
1459 (dr + n)->dev_opt = hdev->flags;
c542a06c 1460
1da177e4
LT
1461 if (++n >= dev_num)
1462 break;
1463 }
f20d09d5 1464 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1465
1466 dl->dev_num = n;
1467 size = sizeof(*dl) + n * sizeof(*dr);
1468
1469 err = copy_to_user(arg, dl, size);
1470 kfree(dl);
1471
1472 return err ? -EFAULT : 0;
1473}
1474
1475int hci_get_dev_info(void __user *arg)
1476{
1477 struct hci_dev *hdev;
1478 struct hci_dev_info di;
1479 int err = 0;
1480
1481 if (copy_from_user(&di, arg, sizeof(di)))
1482 return -EFAULT;
1483
70f23020
AE
1484 hdev = hci_dev_get(di.dev_id);
1485 if (!hdev)
1da177e4
LT
1486 return -ENODEV;
1487
a8b2d5c2 1488 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1489 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1490
a8b2d5c2
JH
1491 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1492 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1493
1da177e4
LT
1494 strcpy(di.name, hdev->name);
1495 di.bdaddr = hdev->bdaddr;
943da25d 1496 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1497 di.flags = hdev->flags;
1498 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1499 if (lmp_bredr_capable(hdev)) {
1500 di.acl_mtu = hdev->acl_mtu;
1501 di.acl_pkts = hdev->acl_pkts;
1502 di.sco_mtu = hdev->sco_mtu;
1503 di.sco_pkts = hdev->sco_pkts;
1504 } else {
1505 di.acl_mtu = hdev->le_mtu;
1506 di.acl_pkts = hdev->le_pkts;
1507 di.sco_mtu = 0;
1508 di.sco_pkts = 0;
1509 }
1da177e4
LT
1510 di.link_policy = hdev->link_policy;
1511 di.link_mode = hdev->link_mode;
1512
1513 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1514 memcpy(&di.features, &hdev->features, sizeof(di.features));
1515
1516 if (copy_to_user(arg, &di, sizeof(di)))
1517 err = -EFAULT;
1518
1519 hci_dev_put(hdev);
1520
1521 return err;
1522}
1523
1524/* ---- Interface to HCI drivers ---- */
1525
611b30f7
MH
1526static int hci_rfkill_set_block(void *data, bool blocked)
1527{
1528 struct hci_dev *hdev = data;
1529
1530 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1531
1532 if (!blocked)
1533 return 0;
1534
1535 hci_dev_do_close(hdev);
1536
1537 return 0;
1538}
1539
1540static const struct rfkill_ops hci_rfkill_ops = {
1541 .set_block = hci_rfkill_set_block,
1542};
1543
ab81cbf9
JH
1544static void hci_power_on(struct work_struct *work)
1545{
1546 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1547
1548 BT_DBG("%s", hdev->name);
1549
1550 if (hci_dev_open(hdev->id) < 0)
1551 return;
1552
a8b2d5c2 1553 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1554 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1555 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1556
a8b2d5c2 1557 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1558 mgmt_index_added(hdev);
ab81cbf9
JH
1559}
1560
1561static void hci_power_off(struct work_struct *work)
1562{
3243553f 1563 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1564 power_off.work);
ab81cbf9
JH
1565
1566 BT_DBG("%s", hdev->name);
1567
8ee56540 1568 hci_dev_do_close(hdev);
ab81cbf9
JH
1569}
1570
16ab91ab
JH
1571static void hci_discov_off(struct work_struct *work)
1572{
1573 struct hci_dev *hdev;
1574 u8 scan = SCAN_PAGE;
1575
1576 hdev = container_of(work, struct hci_dev, discov_off.work);
1577
1578 BT_DBG("%s", hdev->name);
1579
09fd0de5 1580 hci_dev_lock(hdev);
16ab91ab
JH
1581
1582 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1583
1584 hdev->discov_timeout = 0;
1585
09fd0de5 1586 hci_dev_unlock(hdev);
16ab91ab
JH
1587}
1588
2aeb9a1a
JH
1589int hci_uuids_clear(struct hci_dev *hdev)
1590{
4821002c 1591 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1592
4821002c
JH
1593 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1594 list_del(&uuid->list);
2aeb9a1a
JH
1595 kfree(uuid);
1596 }
1597
1598 return 0;
1599}
1600
55ed8ca1
JH
1601int hci_link_keys_clear(struct hci_dev *hdev)
1602{
1603 struct list_head *p, *n;
1604
1605 list_for_each_safe(p, n, &hdev->link_keys) {
1606 struct link_key *key;
1607
1608 key = list_entry(p, struct link_key, list);
1609
1610 list_del(p);
1611 kfree(key);
1612 }
1613
1614 return 0;
1615}
1616
b899efaf
VCG
1617int hci_smp_ltks_clear(struct hci_dev *hdev)
1618{
1619 struct smp_ltk *k, *tmp;
1620
1621 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1622 list_del(&k->list);
1623 kfree(k);
1624 }
1625
1626 return 0;
1627}
1628
55ed8ca1
JH
1629struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1630{
8035ded4 1631 struct link_key *k;
55ed8ca1 1632
8035ded4 1633 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1634 if (bacmp(bdaddr, &k->bdaddr) == 0)
1635 return k;
55ed8ca1
JH
1636
1637 return NULL;
1638}
1639
745c0ce3 1640static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1641 u8 key_type, u8 old_key_type)
d25e28ab
JH
1642{
1643 /* Legacy key */
1644 if (key_type < 0x03)
745c0ce3 1645 return true;
d25e28ab
JH
1646
1647 /* Debug keys are insecure so don't store them persistently */
1648 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1649 return false;
d25e28ab
JH
1650
1651 /* Changed combination key and there's no previous one */
1652 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1653 return false;
d25e28ab
JH
1654
1655 /* Security mode 3 case */
1656 if (!conn)
745c0ce3 1657 return true;
d25e28ab
JH
1658
1659 /* Neither local nor remote side had no-bonding as requirement */
1660 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1661 return true;
d25e28ab
JH
1662
1663 /* Local side had dedicated bonding as requirement */
1664 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1665 return true;
d25e28ab
JH
1666
1667 /* Remote side had dedicated bonding as requirement */
1668 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1669 return true;
d25e28ab
JH
1670
1671 /* If none of the above criteria match, then don't store the key
1672 * persistently */
745c0ce3 1673 return false;
d25e28ab
JH
1674}
1675
c9839a11 1676struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1677{
c9839a11 1678 struct smp_ltk *k;
75d262c2 1679
c9839a11
VCG
1680 list_for_each_entry(k, &hdev->long_term_keys, list) {
1681 if (k->ediv != ediv ||
a8c5fb1a 1682 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1683 continue;
1684
c9839a11 1685 return k;
75d262c2
VCG
1686 }
1687
1688 return NULL;
1689}
75d262c2 1690
c9839a11 1691struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1692 u8 addr_type)
75d262c2 1693{
c9839a11 1694 struct smp_ltk *k;
75d262c2 1695
c9839a11
VCG
1696 list_for_each_entry(k, &hdev->long_term_keys, list)
1697 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1698 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1699 return k;
1700
1701 return NULL;
1702}
75d262c2 1703
d25e28ab 1704int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1705 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1706{
1707 struct link_key *key, *old_key;
745c0ce3
VA
1708 u8 old_key_type;
1709 bool persistent;
55ed8ca1
JH
1710
1711 old_key = hci_find_link_key(hdev, bdaddr);
1712 if (old_key) {
1713 old_key_type = old_key->type;
1714 key = old_key;
1715 } else {
12adcf3a 1716 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1717 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1718 if (!key)
1719 return -ENOMEM;
1720 list_add(&key->list, &hdev->link_keys);
1721 }
1722
6ed93dc6 1723 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1724
d25e28ab
JH
1725 /* Some buggy controller combinations generate a changed
1726 * combination key for legacy pairing even when there's no
1727 * previous key */
1728 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1729 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1730 type = HCI_LK_COMBINATION;
655fe6ec
JH
1731 if (conn)
1732 conn->key_type = type;
1733 }
d25e28ab 1734
55ed8ca1 1735 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1736 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1737 key->pin_len = pin_len;
1738
b6020ba0 1739 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1740 key->type = old_key_type;
4748fed2
JH
1741 else
1742 key->type = type;
1743
4df378a1
JH
1744 if (!new_key)
1745 return 0;
1746
1747 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1748
744cf19e 1749 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1750
6ec5bcad
VA
1751 if (conn)
1752 conn->flush_key = !persistent;
55ed8ca1
JH
1753
1754 return 0;
1755}
1756
c9839a11 1757int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1758 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1759 ediv, u8 rand[8])
75d262c2 1760{
c9839a11 1761 struct smp_ltk *key, *old_key;
75d262c2 1762
c9839a11
VCG
1763 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1764 return 0;
75d262c2 1765
c9839a11
VCG
1766 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1767 if (old_key)
75d262c2 1768 key = old_key;
c9839a11
VCG
1769 else {
1770 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1771 if (!key)
1772 return -ENOMEM;
c9839a11 1773 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1774 }
1775
75d262c2 1776 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1777 key->bdaddr_type = addr_type;
1778 memcpy(key->val, tk, sizeof(key->val));
1779 key->authenticated = authenticated;
1780 key->ediv = ediv;
1781 key->enc_size = enc_size;
1782 key->type = type;
1783 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1784
c9839a11
VCG
1785 if (!new_key)
1786 return 0;
75d262c2 1787
261cc5aa
VCG
1788 if (type & HCI_SMP_LTK)
1789 mgmt_new_ltk(hdev, key, 1);
1790
75d262c2
VCG
1791 return 0;
1792}
1793
55ed8ca1
JH
1794int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1795{
1796 struct link_key *key;
1797
1798 key = hci_find_link_key(hdev, bdaddr);
1799 if (!key)
1800 return -ENOENT;
1801
6ed93dc6 1802 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1803
1804 list_del(&key->list);
1805 kfree(key);
1806
1807 return 0;
1808}
1809
b899efaf
VCG
1810int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1811{
1812 struct smp_ltk *k, *tmp;
1813
1814 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1815 if (bacmp(bdaddr, &k->bdaddr))
1816 continue;
1817
6ed93dc6 1818 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1819
1820 list_del(&k->list);
1821 kfree(k);
1822 }
1823
1824 return 0;
1825}
1826
6bd32326 1827/* HCI command timer function */
bda4f23a 1828static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1829{
1830 struct hci_dev *hdev = (void *) arg;
1831
bda4f23a
AE
1832 if (hdev->sent_cmd) {
1833 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1834 u16 opcode = __le16_to_cpu(sent->opcode);
1835
1836 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1837 } else {
1838 BT_ERR("%s command tx timeout", hdev->name);
1839 }
1840
6bd32326 1841 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1842 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1843}
1844
2763eda6 1845struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1846 bdaddr_t *bdaddr)
2763eda6
SJ
1847{
1848 struct oob_data *data;
1849
1850 list_for_each_entry(data, &hdev->remote_oob_data, list)
1851 if (bacmp(bdaddr, &data->bdaddr) == 0)
1852 return data;
1853
1854 return NULL;
1855}
1856
1857int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1858{
1859 struct oob_data *data;
1860
1861 data = hci_find_remote_oob_data(hdev, bdaddr);
1862 if (!data)
1863 return -ENOENT;
1864
6ed93dc6 1865 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1866
1867 list_del(&data->list);
1868 kfree(data);
1869
1870 return 0;
1871}
1872
1873int hci_remote_oob_data_clear(struct hci_dev *hdev)
1874{
1875 struct oob_data *data, *n;
1876
1877 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1878 list_del(&data->list);
1879 kfree(data);
1880 }
1881
1882 return 0;
1883}
1884
1885int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1886 u8 *randomizer)
2763eda6
SJ
1887{
1888 struct oob_data *data;
1889
1890 data = hci_find_remote_oob_data(hdev, bdaddr);
1891
1892 if (!data) {
1893 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1894 if (!data)
1895 return -ENOMEM;
1896
1897 bacpy(&data->bdaddr, bdaddr);
1898 list_add(&data->list, &hdev->remote_oob_data);
1899 }
1900
1901 memcpy(data->hash, hash, sizeof(data->hash));
1902 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1903
6ed93dc6 1904 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1905
1906 return 0;
1907}
1908
04124681 1909struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1910{
8035ded4 1911 struct bdaddr_list *b;
b2a66aad 1912
8035ded4 1913 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1914 if (bacmp(bdaddr, &b->bdaddr) == 0)
1915 return b;
b2a66aad
AJ
1916
1917 return NULL;
1918}
1919
1920int hci_blacklist_clear(struct hci_dev *hdev)
1921{
1922 struct list_head *p, *n;
1923
1924 list_for_each_safe(p, n, &hdev->blacklist) {
1925 struct bdaddr_list *b;
1926
1927 b = list_entry(p, struct bdaddr_list, list);
1928
1929 list_del(p);
1930 kfree(b);
1931 }
1932
1933 return 0;
1934}
1935
88c1fe4b 1936int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1937{
1938 struct bdaddr_list *entry;
b2a66aad
AJ
1939
1940 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1941 return -EBADF;
1942
5e762444
AJ
1943 if (hci_blacklist_lookup(hdev, bdaddr))
1944 return -EEXIST;
b2a66aad
AJ
1945
1946 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1947 if (!entry)
1948 return -ENOMEM;
b2a66aad
AJ
1949
1950 bacpy(&entry->bdaddr, bdaddr);
1951
1952 list_add(&entry->list, &hdev->blacklist);
1953
88c1fe4b 1954 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1955}
1956
88c1fe4b 1957int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1958{
1959 struct bdaddr_list *entry;
b2a66aad 1960
1ec918ce 1961 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1962 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1963
1964 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1965 if (!entry)
5e762444 1966 return -ENOENT;
b2a66aad
AJ
1967
1968 list_del(&entry->list);
1969 kfree(entry);
1970
88c1fe4b 1971 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1972}
1973
42c6b129 1974static void le_scan_param_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1975{
1976 struct le_scan_params *param = (struct le_scan_params *) opt;
1977 struct hci_cp_le_set_scan_param cp;
1978
1979 memset(&cp, 0, sizeof(cp));
1980 cp.type = param->type;
1981 cp.interval = cpu_to_le16(param->interval);
1982 cp.window = cpu_to_le16(param->window);
1983
42c6b129 1984 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
7ba8b4be
AG
1985}
1986
42c6b129 1987static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1988{
1989 struct hci_cp_le_set_scan_enable cp;
1990
1991 memset(&cp, 0, sizeof(cp));
76a388be 1992 cp.enable = LE_SCAN_ENABLE;
525e296a 1993 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
7ba8b4be 1994
42c6b129 1995 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
7ba8b4be
AG
1996}
1997
1998static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1999 u16 window, int timeout)
7ba8b4be
AG
2000{
2001 long timeo = msecs_to_jiffies(3000);
2002 struct le_scan_params param;
2003 int err;
2004
2005 BT_DBG("%s", hdev->name);
2006
2007 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2008 return -EINPROGRESS;
2009
2010 param.type = type;
2011 param.interval = interval;
2012 param.window = window;
2013
2014 hci_req_lock(hdev);
2015
01178cd4
JH
2016 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2017 timeo);
7ba8b4be 2018 if (!err)
01178cd4 2019 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
2020
2021 hci_req_unlock(hdev);
2022
2023 if (err < 0)
2024 return err;
2025
46818ed5 2026 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
b6c7515a 2027 timeout);
7ba8b4be
AG
2028
2029 return 0;
2030}
2031
7dbfac1d
AG
2032int hci_cancel_le_scan(struct hci_dev *hdev)
2033{
2034 BT_DBG("%s", hdev->name);
2035
2036 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2037 return -EALREADY;
2038
2039 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2040 struct hci_cp_le_set_scan_enable cp;
2041
2042 /* Send HCI command to disable LE Scan */
2043 memset(&cp, 0, sizeof(cp));
2044 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2045 }
2046
2047 return 0;
2048}
2049
7ba8b4be
AG
2050static void le_scan_disable_work(struct work_struct *work)
2051{
2052 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2053 le_scan_disable.work);
7ba8b4be
AG
2054 struct hci_cp_le_set_scan_enable cp;
2055
2056 BT_DBG("%s", hdev->name);
2057
2058 memset(&cp, 0, sizeof(cp));
2059
2060 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2061}
2062
28b75a89
AG
2063static void le_scan_work(struct work_struct *work)
2064{
2065 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2066 struct le_scan_params *param = &hdev->le_scan_params;
2067
2068 BT_DBG("%s", hdev->name);
2069
04124681
GP
2070 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2071 param->timeout);
28b75a89
AG
2072}
2073
2074int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 2075 int timeout)
28b75a89
AG
2076{
2077 struct le_scan_params *param = &hdev->le_scan_params;
2078
2079 BT_DBG("%s", hdev->name);
2080
f1550478
JH
2081 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2082 return -ENOTSUPP;
2083
28b75a89
AG
2084 if (work_busy(&hdev->le_scan))
2085 return -EINPROGRESS;
2086
2087 param->type = type;
2088 param->interval = interval;
2089 param->window = window;
2090 param->timeout = timeout;
2091
2092 queue_work(system_long_wq, &hdev->le_scan);
2093
2094 return 0;
2095}
2096
9be0dab7
DH
2097/* Alloc HCI device */
2098struct hci_dev *hci_alloc_dev(void)
2099{
2100 struct hci_dev *hdev;
2101
2102 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2103 if (!hdev)
2104 return NULL;
2105
b1b813d4
DH
2106 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2107 hdev->esco_type = (ESCO_HV1);
2108 hdev->link_mode = (HCI_LM_ACCEPT);
2109 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2110 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2111 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2112
b1b813d4
DH
2113 hdev->sniff_max_interval = 800;
2114 hdev->sniff_min_interval = 80;
2115
2116 mutex_init(&hdev->lock);
2117 mutex_init(&hdev->req_lock);
2118
2119 INIT_LIST_HEAD(&hdev->mgmt_pending);
2120 INIT_LIST_HEAD(&hdev->blacklist);
2121 INIT_LIST_HEAD(&hdev->uuids);
2122 INIT_LIST_HEAD(&hdev->link_keys);
2123 INIT_LIST_HEAD(&hdev->long_term_keys);
2124 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2125 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2126
2127 INIT_WORK(&hdev->rx_work, hci_rx_work);
2128 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2129 INIT_WORK(&hdev->tx_work, hci_tx_work);
2130 INIT_WORK(&hdev->power_on, hci_power_on);
2131 INIT_WORK(&hdev->le_scan, le_scan_work);
2132
b1b813d4
DH
2133 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2134 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2135 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2136
b1b813d4
DH
2137 skb_queue_head_init(&hdev->rx_q);
2138 skb_queue_head_init(&hdev->cmd_q);
2139 skb_queue_head_init(&hdev->raw_q);
2140
2141 init_waitqueue_head(&hdev->req_wait_q);
2142
bda4f23a 2143 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2144
b1b813d4
DH
2145 hci_init_sysfs(hdev);
2146 discovery_init(hdev);
9be0dab7
DH
2147
2148 return hdev;
2149}
2150EXPORT_SYMBOL(hci_alloc_dev);
2151
2152/* Free HCI device */
2153void hci_free_dev(struct hci_dev *hdev)
2154{
9be0dab7
DH
2155 /* will free via device release */
2156 put_device(&hdev->dev);
2157}
2158EXPORT_SYMBOL(hci_free_dev);
2159
1da177e4
LT
2160/* Register HCI device */
2161int hci_register_dev(struct hci_dev *hdev)
2162{
b1b813d4 2163 int id, error;
1da177e4 2164
010666a1 2165 if (!hdev->open || !hdev->close)
1da177e4
LT
2166 return -EINVAL;
2167
08add513
MM
2168 /* Do not allow HCI_AMP devices to register at index 0,
2169 * so the index can be used as the AMP controller ID.
2170 */
3df92b31
SL
2171 switch (hdev->dev_type) {
2172 case HCI_BREDR:
2173 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2174 break;
2175 case HCI_AMP:
2176 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2177 break;
2178 default:
2179 return -EINVAL;
1da177e4 2180 }
8e87d142 2181
3df92b31
SL
2182 if (id < 0)
2183 return id;
2184
1da177e4
LT
2185 sprintf(hdev->name, "hci%d", id);
2186 hdev->id = id;
2d8b3a11
AE
2187
2188 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2189
3df92b31
SL
2190 write_lock(&hci_dev_list_lock);
2191 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2192 write_unlock(&hci_dev_list_lock);
1da177e4 2193
32845eb1 2194 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 2195 WQ_MEM_RECLAIM, 1);
33ca954d
DH
2196 if (!hdev->workqueue) {
2197 error = -ENOMEM;
2198 goto err;
2199 }
f48fd9c8 2200
6ead1bbc
JH
2201 hdev->req_workqueue = alloc_workqueue(hdev->name,
2202 WQ_HIGHPRI | WQ_UNBOUND |
2203 WQ_MEM_RECLAIM, 1);
2204 if (!hdev->req_workqueue) {
2205 destroy_workqueue(hdev->workqueue);
2206 error = -ENOMEM;
2207 goto err;
2208 }
2209
33ca954d
DH
2210 error = hci_add_sysfs(hdev);
2211 if (error < 0)
2212 goto err_wqueue;
1da177e4 2213
611b30f7 2214 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2215 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2216 hdev);
611b30f7
MH
2217 if (hdev->rfkill) {
2218 if (rfkill_register(hdev->rfkill) < 0) {
2219 rfkill_destroy(hdev->rfkill);
2220 hdev->rfkill = NULL;
2221 }
2222 }
2223
a8b2d5c2 2224 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2225
2226 if (hdev->dev_type != HCI_AMP)
2227 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2228
1da177e4 2229 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2230 hci_dev_hold(hdev);
1da177e4 2231
19202573 2232 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2233
1da177e4 2234 return id;
f48fd9c8 2235
33ca954d
DH
2236err_wqueue:
2237 destroy_workqueue(hdev->workqueue);
6ead1bbc 2238 destroy_workqueue(hdev->req_workqueue);
33ca954d 2239err:
3df92b31 2240 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2241 write_lock(&hci_dev_list_lock);
f48fd9c8 2242 list_del(&hdev->list);
f20d09d5 2243 write_unlock(&hci_dev_list_lock);
f48fd9c8 2244
33ca954d 2245 return error;
1da177e4
LT
2246}
2247EXPORT_SYMBOL(hci_register_dev);
2248
2249/* Unregister HCI device */
59735631 2250void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2251{
3df92b31 2252 int i, id;
ef222013 2253
c13854ce 2254 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2255
94324962
JH
2256 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2257
3df92b31
SL
2258 id = hdev->id;
2259
f20d09d5 2260 write_lock(&hci_dev_list_lock);
1da177e4 2261 list_del(&hdev->list);
f20d09d5 2262 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2263
2264 hci_dev_do_close(hdev);
2265
cd4c5391 2266 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2267 kfree_skb(hdev->reassembly[i]);
2268
b9b5ef18
GP
2269 cancel_work_sync(&hdev->power_on);
2270
ab81cbf9 2271 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2272 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2273 hci_dev_lock(hdev);
744cf19e 2274 mgmt_index_removed(hdev);
09fd0de5 2275 hci_dev_unlock(hdev);
56e5cb86 2276 }
ab81cbf9 2277
2e58ef3e
JH
2278 /* mgmt_index_removed should take care of emptying the
2279 * pending list */
2280 BUG_ON(!list_empty(&hdev->mgmt_pending));
2281
1da177e4
LT
2282 hci_notify(hdev, HCI_DEV_UNREG);
2283
611b30f7
MH
2284 if (hdev->rfkill) {
2285 rfkill_unregister(hdev->rfkill);
2286 rfkill_destroy(hdev->rfkill);
2287 }
2288
ce242970 2289 hci_del_sysfs(hdev);
147e2d59 2290
f48fd9c8 2291 destroy_workqueue(hdev->workqueue);
6ead1bbc 2292 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2293
09fd0de5 2294 hci_dev_lock(hdev);
e2e0cacb 2295 hci_blacklist_clear(hdev);
2aeb9a1a 2296 hci_uuids_clear(hdev);
55ed8ca1 2297 hci_link_keys_clear(hdev);
b899efaf 2298 hci_smp_ltks_clear(hdev);
2763eda6 2299 hci_remote_oob_data_clear(hdev);
09fd0de5 2300 hci_dev_unlock(hdev);
e2e0cacb 2301
dc946bd8 2302 hci_dev_put(hdev);
3df92b31
SL
2303
2304 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2305}
2306EXPORT_SYMBOL(hci_unregister_dev);
2307
2308/* Suspend HCI device */
2309int hci_suspend_dev(struct hci_dev *hdev)
2310{
2311 hci_notify(hdev, HCI_DEV_SUSPEND);
2312 return 0;
2313}
2314EXPORT_SYMBOL(hci_suspend_dev);
2315
2316/* Resume HCI device */
2317int hci_resume_dev(struct hci_dev *hdev)
2318{
2319 hci_notify(hdev, HCI_DEV_RESUME);
2320 return 0;
2321}
2322EXPORT_SYMBOL(hci_resume_dev);
2323
76bca880
MH
2324/* Receive frame from HCI drivers */
2325int hci_recv_frame(struct sk_buff *skb)
2326{
2327 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2328 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2329 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2330 kfree_skb(skb);
2331 return -ENXIO;
2332 }
2333
d82603c6 2334 /* Incoming skb */
76bca880
MH
2335 bt_cb(skb)->incoming = 1;
2336
2337 /* Time stamp */
2338 __net_timestamp(skb);
2339
76bca880 2340 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2341 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2342
76bca880
MH
2343 return 0;
2344}
2345EXPORT_SYMBOL(hci_recv_frame);
2346
33e882a5 2347static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2348 int count, __u8 index)
33e882a5
SS
2349{
2350 int len = 0;
2351 int hlen = 0;
2352 int remain = count;
2353 struct sk_buff *skb;
2354 struct bt_skb_cb *scb;
2355
2356 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2357 index >= NUM_REASSEMBLY)
33e882a5
SS
2358 return -EILSEQ;
2359
2360 skb = hdev->reassembly[index];
2361
2362 if (!skb) {
2363 switch (type) {
2364 case HCI_ACLDATA_PKT:
2365 len = HCI_MAX_FRAME_SIZE;
2366 hlen = HCI_ACL_HDR_SIZE;
2367 break;
2368 case HCI_EVENT_PKT:
2369 len = HCI_MAX_EVENT_SIZE;
2370 hlen = HCI_EVENT_HDR_SIZE;
2371 break;
2372 case HCI_SCODATA_PKT:
2373 len = HCI_MAX_SCO_SIZE;
2374 hlen = HCI_SCO_HDR_SIZE;
2375 break;
2376 }
2377
1e429f38 2378 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2379 if (!skb)
2380 return -ENOMEM;
2381
2382 scb = (void *) skb->cb;
2383 scb->expect = hlen;
2384 scb->pkt_type = type;
2385
2386 skb->dev = (void *) hdev;
2387 hdev->reassembly[index] = skb;
2388 }
2389
2390 while (count) {
2391 scb = (void *) skb->cb;
89bb46d0 2392 len = min_t(uint, scb->expect, count);
33e882a5
SS
2393
2394 memcpy(skb_put(skb, len), data, len);
2395
2396 count -= len;
2397 data += len;
2398 scb->expect -= len;
2399 remain = count;
2400
2401 switch (type) {
2402 case HCI_EVENT_PKT:
2403 if (skb->len == HCI_EVENT_HDR_SIZE) {
2404 struct hci_event_hdr *h = hci_event_hdr(skb);
2405 scb->expect = h->plen;
2406
2407 if (skb_tailroom(skb) < scb->expect) {
2408 kfree_skb(skb);
2409 hdev->reassembly[index] = NULL;
2410 return -ENOMEM;
2411 }
2412 }
2413 break;
2414
2415 case HCI_ACLDATA_PKT:
2416 if (skb->len == HCI_ACL_HDR_SIZE) {
2417 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2418 scb->expect = __le16_to_cpu(h->dlen);
2419
2420 if (skb_tailroom(skb) < scb->expect) {
2421 kfree_skb(skb);
2422 hdev->reassembly[index] = NULL;
2423 return -ENOMEM;
2424 }
2425 }
2426 break;
2427
2428 case HCI_SCODATA_PKT:
2429 if (skb->len == HCI_SCO_HDR_SIZE) {
2430 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2431 scb->expect = h->dlen;
2432
2433 if (skb_tailroom(skb) < scb->expect) {
2434 kfree_skb(skb);
2435 hdev->reassembly[index] = NULL;
2436 return -ENOMEM;
2437 }
2438 }
2439 break;
2440 }
2441
2442 if (scb->expect == 0) {
2443 /* Complete frame */
2444
2445 bt_cb(skb)->pkt_type = type;
2446 hci_recv_frame(skb);
2447
2448 hdev->reassembly[index] = NULL;
2449 return remain;
2450 }
2451 }
2452
2453 return remain;
2454}
2455
ef222013
MH
2456int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2457{
f39a3c06
SS
2458 int rem = 0;
2459
ef222013
MH
2460 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2461 return -EILSEQ;
2462
da5f6c37 2463 while (count) {
1e429f38 2464 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2465 if (rem < 0)
2466 return rem;
ef222013 2467
f39a3c06
SS
2468 data += (count - rem);
2469 count = rem;
f81c6224 2470 }
ef222013 2471
f39a3c06 2472 return rem;
ef222013
MH
2473}
2474EXPORT_SYMBOL(hci_recv_fragment);
2475
99811510
SS
2476#define STREAM_REASSEMBLY 0
2477
2478int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2479{
2480 int type;
2481 int rem = 0;
2482
da5f6c37 2483 while (count) {
99811510
SS
2484 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2485
2486 if (!skb) {
2487 struct { char type; } *pkt;
2488
2489 /* Start of the frame */
2490 pkt = data;
2491 type = pkt->type;
2492
2493 data++;
2494 count--;
2495 } else
2496 type = bt_cb(skb)->pkt_type;
2497
1e429f38 2498 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2499 STREAM_REASSEMBLY);
99811510
SS
2500 if (rem < 0)
2501 return rem;
2502
2503 data += (count - rem);
2504 count = rem;
f81c6224 2505 }
99811510
SS
2506
2507 return rem;
2508}
2509EXPORT_SYMBOL(hci_recv_stream_fragment);
2510
1da177e4
LT
2511/* ---- Interface to upper protocols ---- */
2512
1da177e4
LT
2513int hci_register_cb(struct hci_cb *cb)
2514{
2515 BT_DBG("%p name %s", cb, cb->name);
2516
f20d09d5 2517 write_lock(&hci_cb_list_lock);
1da177e4 2518 list_add(&cb->list, &hci_cb_list);
f20d09d5 2519 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2520
2521 return 0;
2522}
2523EXPORT_SYMBOL(hci_register_cb);
2524
2525int hci_unregister_cb(struct hci_cb *cb)
2526{
2527 BT_DBG("%p name %s", cb, cb->name);
2528
f20d09d5 2529 write_lock(&hci_cb_list_lock);
1da177e4 2530 list_del(&cb->list);
f20d09d5 2531 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2532
2533 return 0;
2534}
2535EXPORT_SYMBOL(hci_unregister_cb);
2536
2537static int hci_send_frame(struct sk_buff *skb)
2538{
2539 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2540
2541 if (!hdev) {
2542 kfree_skb(skb);
2543 return -ENODEV;
2544 }
2545
0d48d939 2546 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2547
cd82e61c
MH
2548 /* Time stamp */
2549 __net_timestamp(skb);
1da177e4 2550
cd82e61c
MH
2551 /* Send copy to monitor */
2552 hci_send_to_monitor(hdev, skb);
2553
2554 if (atomic_read(&hdev->promisc)) {
2555 /* Send copy to the sockets */
470fe1b5 2556 hci_send_to_sock(hdev, skb);
1da177e4
LT
2557 }
2558
2559 /* Get rid of skb owner, prior to sending to the driver. */
2560 skb_orphan(skb);
2561
2562 return hdev->send(skb);
2563}
2564
3119ae95
JH
2565void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2566{
2567 skb_queue_head_init(&req->cmd_q);
2568 req->hdev = hdev;
5d73e034 2569 req->err = 0;
3119ae95
JH
2570}
2571
2572int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2573{
2574 struct hci_dev *hdev = req->hdev;
2575 struct sk_buff *skb;
2576 unsigned long flags;
2577
2578 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2579
5d73e034
AG
2580 /* If an error occured during request building, remove all HCI
2581 * commands queued on the HCI request queue.
2582 */
2583 if (req->err) {
2584 skb_queue_purge(&req->cmd_q);
2585 return req->err;
2586 }
2587
3119ae95
JH
2588 /* Do not allow empty requests */
2589 if (skb_queue_empty(&req->cmd_q))
382b0c39 2590 return -ENODATA;
3119ae95
JH
2591
2592 skb = skb_peek_tail(&req->cmd_q);
2593 bt_cb(skb)->req.complete = complete;
2594
2595 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2596 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2597 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2598
2599 queue_work(hdev->workqueue, &hdev->cmd_work);
2600
2601 return 0;
2602}
2603
1ca3a9d0 2604static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 2605 u32 plen, const void *param)
1da177e4
LT
2606{
2607 int len = HCI_COMMAND_HDR_SIZE + plen;
2608 struct hci_command_hdr *hdr;
2609 struct sk_buff *skb;
2610
1da177e4 2611 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2612 if (!skb)
2613 return NULL;
1da177e4
LT
2614
2615 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2616 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2617 hdr->plen = plen;
2618
2619 if (plen)
2620 memcpy(skb_put(skb, plen), param, plen);
2621
2622 BT_DBG("skb len %d", skb->len);
2623
0d48d939 2624 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2625 skb->dev = (void *) hdev;
c78ae283 2626
1ca3a9d0
JH
2627 return skb;
2628}
2629
2630/* Send HCI command */
07dc93dd
JH
2631int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2632 const void *param)
1ca3a9d0
JH
2633{
2634 struct sk_buff *skb;
2635
2636 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2637
2638 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2639 if (!skb) {
2640 BT_ERR("%s no memory for command", hdev->name);
2641 return -ENOMEM;
2642 }
2643
11714b3d
JH
2644 /* Stand-alone HCI commands must be flaged as
2645 * single-command requests.
2646 */
2647 bt_cb(skb)->req.start = true;
2648
1da177e4 2649 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2650 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2651
2652 return 0;
2653}
1da177e4 2654
71c76a17 2655/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
2656void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2657 const void *param, u8 event)
71c76a17
JH
2658{
2659 struct hci_dev *hdev = req->hdev;
2660 struct sk_buff *skb;
2661
2662 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2663
34739c1e
AG
2664 /* If an error occured during request building, there is no point in
2665 * queueing the HCI command. We can simply return.
2666 */
2667 if (req->err)
2668 return;
2669
71c76a17
JH
2670 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2671 if (!skb) {
5d73e034
AG
2672 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2673 hdev->name, opcode);
2674 req->err = -ENOMEM;
e348fe6b 2675 return;
71c76a17
JH
2676 }
2677
2678 if (skb_queue_empty(&req->cmd_q))
2679 bt_cb(skb)->req.start = true;
2680
02350a72
JH
2681 bt_cb(skb)->req.event = event;
2682
71c76a17 2683 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2684}
2685
07dc93dd
JH
2686void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2687 const void *param)
02350a72
JH
2688{
2689 hci_req_add_ev(req, opcode, plen, param, 0);
2690}
2691
1da177e4 2692/* Get data from the previously sent command */
a9de9248 2693void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2694{
2695 struct hci_command_hdr *hdr;
2696
2697 if (!hdev->sent_cmd)
2698 return NULL;
2699
2700 hdr = (void *) hdev->sent_cmd->data;
2701
a9de9248 2702 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2703 return NULL;
2704
f0e09510 2705 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2706
2707 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2708}
2709
2710/* Send ACL data */
2711static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2712{
2713 struct hci_acl_hdr *hdr;
2714 int len = skb->len;
2715
badff6d0
ACM
2716 skb_push(skb, HCI_ACL_HDR_SIZE);
2717 skb_reset_transport_header(skb);
9c70220b 2718 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2719 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2720 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2721}
2722
ee22be7e 2723static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2724 struct sk_buff *skb, __u16 flags)
1da177e4 2725{
ee22be7e 2726 struct hci_conn *conn = chan->conn;
1da177e4
LT
2727 struct hci_dev *hdev = conn->hdev;
2728 struct sk_buff *list;
2729
087bfd99
GP
2730 skb->len = skb_headlen(skb);
2731 skb->data_len = 0;
2732
2733 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2734
2735 switch (hdev->dev_type) {
2736 case HCI_BREDR:
2737 hci_add_acl_hdr(skb, conn->handle, flags);
2738 break;
2739 case HCI_AMP:
2740 hci_add_acl_hdr(skb, chan->handle, flags);
2741 break;
2742 default:
2743 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2744 return;
2745 }
087bfd99 2746
70f23020
AE
2747 list = skb_shinfo(skb)->frag_list;
2748 if (!list) {
1da177e4
LT
2749 /* Non fragmented */
2750 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2751
73d80deb 2752 skb_queue_tail(queue, skb);
1da177e4
LT
2753 } else {
2754 /* Fragmented */
2755 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2756
2757 skb_shinfo(skb)->frag_list = NULL;
2758
2759 /* Queue all fragments atomically */
af3e6359 2760 spin_lock(&queue->lock);
1da177e4 2761
73d80deb 2762 __skb_queue_tail(queue, skb);
e702112f
AE
2763
2764 flags &= ~ACL_START;
2765 flags |= ACL_CONT;
1da177e4
LT
2766 do {
2767 skb = list; list = list->next;
8e87d142 2768
1da177e4 2769 skb->dev = (void *) hdev;
0d48d939 2770 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2771 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2772
2773 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2774
73d80deb 2775 __skb_queue_tail(queue, skb);
1da177e4
LT
2776 } while (list);
2777
af3e6359 2778 spin_unlock(&queue->lock);
1da177e4 2779 }
73d80deb
LAD
2780}
2781
2782void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2783{
ee22be7e 2784 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2785
f0e09510 2786 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2787
2788 skb->dev = (void *) hdev;
73d80deb 2789
ee22be7e 2790 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2791
3eff45ea 2792 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2793}
1da177e4
LT
2794
2795/* Send SCO data */
0d861d8b 2796void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2797{
2798 struct hci_dev *hdev = conn->hdev;
2799 struct hci_sco_hdr hdr;
2800
2801 BT_DBG("%s len %d", hdev->name, skb->len);
2802
aca3192c 2803 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2804 hdr.dlen = skb->len;
2805
badff6d0
ACM
2806 skb_push(skb, HCI_SCO_HDR_SIZE);
2807 skb_reset_transport_header(skb);
9c70220b 2808 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2809
2810 skb->dev = (void *) hdev;
0d48d939 2811 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2812
1da177e4 2813 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2814 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2815}
1da177e4
LT
2816
2817/* ---- HCI TX task (outgoing data) ---- */
2818
2819/* HCI Connection scheduler */
6039aa73
GP
2820static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2821 int *quote)
1da177e4
LT
2822{
2823 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2824 struct hci_conn *conn = NULL, *c;
abc5de8f 2825 unsigned int num = 0, min = ~0;
1da177e4 2826
8e87d142 2827 /* We don't have to lock device here. Connections are always
1da177e4 2828 * added and removed with TX task disabled. */
bf4c6325
GP
2829
2830 rcu_read_lock();
2831
2832 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2833 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2834 continue;
769be974
MH
2835
2836 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2837 continue;
2838
1da177e4
LT
2839 num++;
2840
2841 if (c->sent < min) {
2842 min = c->sent;
2843 conn = c;
2844 }
52087a79
LAD
2845
2846 if (hci_conn_num(hdev, type) == num)
2847 break;
1da177e4
LT
2848 }
2849
bf4c6325
GP
2850 rcu_read_unlock();
2851
1da177e4 2852 if (conn) {
6ed58ec5
VT
2853 int cnt, q;
2854
2855 switch (conn->type) {
2856 case ACL_LINK:
2857 cnt = hdev->acl_cnt;
2858 break;
2859 case SCO_LINK:
2860 case ESCO_LINK:
2861 cnt = hdev->sco_cnt;
2862 break;
2863 case LE_LINK:
2864 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2865 break;
2866 default:
2867 cnt = 0;
2868 BT_ERR("Unknown link type");
2869 }
2870
2871 q = cnt / num;
1da177e4
LT
2872 *quote = q ? q : 1;
2873 } else
2874 *quote = 0;
2875
2876 BT_DBG("conn %p quote %d", conn, *quote);
2877 return conn;
2878}
2879
6039aa73 2880static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2881{
2882 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2883 struct hci_conn *c;
1da177e4 2884
bae1f5d9 2885 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2886
bf4c6325
GP
2887 rcu_read_lock();
2888
1da177e4 2889 /* Kill stalled connections */
bf4c6325 2890 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2891 if (c->type == type && c->sent) {
6ed93dc6
AE
2892 BT_ERR("%s killing stalled connection %pMR",
2893 hdev->name, &c->dst);
bed71748 2894 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2895 }
2896 }
bf4c6325
GP
2897
2898 rcu_read_unlock();
1da177e4
LT
2899}
2900
6039aa73
GP
2901static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2902 int *quote)
1da177e4 2903{
73d80deb
LAD
2904 struct hci_conn_hash *h = &hdev->conn_hash;
2905 struct hci_chan *chan = NULL;
abc5de8f 2906 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2907 struct hci_conn *conn;
73d80deb
LAD
2908 int cnt, q, conn_num = 0;
2909
2910 BT_DBG("%s", hdev->name);
2911
bf4c6325
GP
2912 rcu_read_lock();
2913
2914 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2915 struct hci_chan *tmp;
2916
2917 if (conn->type != type)
2918 continue;
2919
2920 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2921 continue;
2922
2923 conn_num++;
2924
8192edef 2925 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2926 struct sk_buff *skb;
2927
2928 if (skb_queue_empty(&tmp->data_q))
2929 continue;
2930
2931 skb = skb_peek(&tmp->data_q);
2932 if (skb->priority < cur_prio)
2933 continue;
2934
2935 if (skb->priority > cur_prio) {
2936 num = 0;
2937 min = ~0;
2938 cur_prio = skb->priority;
2939 }
2940
2941 num++;
2942
2943 if (conn->sent < min) {
2944 min = conn->sent;
2945 chan = tmp;
2946 }
2947 }
2948
2949 if (hci_conn_num(hdev, type) == conn_num)
2950 break;
2951 }
2952
bf4c6325
GP
2953 rcu_read_unlock();
2954
73d80deb
LAD
2955 if (!chan)
2956 return NULL;
2957
2958 switch (chan->conn->type) {
2959 case ACL_LINK:
2960 cnt = hdev->acl_cnt;
2961 break;
bd1eb66b
AE
2962 case AMP_LINK:
2963 cnt = hdev->block_cnt;
2964 break;
73d80deb
LAD
2965 case SCO_LINK:
2966 case ESCO_LINK:
2967 cnt = hdev->sco_cnt;
2968 break;
2969 case LE_LINK:
2970 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2971 break;
2972 default:
2973 cnt = 0;
2974 BT_ERR("Unknown link type");
2975 }
2976
2977 q = cnt / num;
2978 *quote = q ? q : 1;
2979 BT_DBG("chan %p quote %d", chan, *quote);
2980 return chan;
2981}
2982
02b20f0b
LAD
2983static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2984{
2985 struct hci_conn_hash *h = &hdev->conn_hash;
2986 struct hci_conn *conn;
2987 int num = 0;
2988
2989 BT_DBG("%s", hdev->name);
2990
bf4c6325
GP
2991 rcu_read_lock();
2992
2993 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2994 struct hci_chan *chan;
2995
2996 if (conn->type != type)
2997 continue;
2998
2999 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3000 continue;
3001
3002 num++;
3003
8192edef 3004 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3005 struct sk_buff *skb;
3006
3007 if (chan->sent) {
3008 chan->sent = 0;
3009 continue;
3010 }
3011
3012 if (skb_queue_empty(&chan->data_q))
3013 continue;
3014
3015 skb = skb_peek(&chan->data_q);
3016 if (skb->priority >= HCI_PRIO_MAX - 1)
3017 continue;
3018
3019 skb->priority = HCI_PRIO_MAX - 1;
3020
3021 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3022 skb->priority);
02b20f0b
LAD
3023 }
3024
3025 if (hci_conn_num(hdev, type) == num)
3026 break;
3027 }
bf4c6325
GP
3028
3029 rcu_read_unlock();
3030
02b20f0b
LAD
3031}
3032
b71d385a
AE
3033static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3034{
3035 /* Calculate count of blocks used by this packet */
3036 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3037}
3038
6039aa73 3039static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3040{
1da177e4
LT
3041 if (!test_bit(HCI_RAW, &hdev->flags)) {
3042 /* ACL tx timeout must be longer than maximum
3043 * link supervision timeout (40.9 seconds) */
63d2bc1b 3044 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3045 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3046 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3047 }
63d2bc1b 3048}
1da177e4 3049
6039aa73 3050static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3051{
3052 unsigned int cnt = hdev->acl_cnt;
3053 struct hci_chan *chan;
3054 struct sk_buff *skb;
3055 int quote;
3056
3057 __check_timeout(hdev, cnt);
04837f64 3058
73d80deb 3059 while (hdev->acl_cnt &&
a8c5fb1a 3060 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3061 u32 priority = (skb_peek(&chan->data_q))->priority;
3062 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3063 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3064 skb->len, skb->priority);
73d80deb 3065
ec1cce24
LAD
3066 /* Stop if priority has changed */
3067 if (skb->priority < priority)
3068 break;
3069
3070 skb = skb_dequeue(&chan->data_q);
3071
73d80deb 3072 hci_conn_enter_active_mode(chan->conn,
04124681 3073 bt_cb(skb)->force_active);
04837f64 3074
1da177e4
LT
3075 hci_send_frame(skb);
3076 hdev->acl_last_tx = jiffies;
3077
3078 hdev->acl_cnt--;
73d80deb
LAD
3079 chan->sent++;
3080 chan->conn->sent++;
1da177e4
LT
3081 }
3082 }
02b20f0b
LAD
3083
3084 if (cnt != hdev->acl_cnt)
3085 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3086}
3087
6039aa73 3088static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3089{
63d2bc1b 3090 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3091 struct hci_chan *chan;
3092 struct sk_buff *skb;
3093 int quote;
bd1eb66b 3094 u8 type;
b71d385a 3095
63d2bc1b 3096 __check_timeout(hdev, cnt);
b71d385a 3097
bd1eb66b
AE
3098 BT_DBG("%s", hdev->name);
3099
3100 if (hdev->dev_type == HCI_AMP)
3101 type = AMP_LINK;
3102 else
3103 type = ACL_LINK;
3104
b71d385a 3105 while (hdev->block_cnt > 0 &&
bd1eb66b 3106 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3107 u32 priority = (skb_peek(&chan->data_q))->priority;
3108 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3109 int blocks;
3110
3111 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3112 skb->len, skb->priority);
b71d385a
AE
3113
3114 /* Stop if priority has changed */
3115 if (skb->priority < priority)
3116 break;
3117
3118 skb = skb_dequeue(&chan->data_q);
3119
3120 blocks = __get_blocks(hdev, skb);
3121 if (blocks > hdev->block_cnt)
3122 return;
3123
3124 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3125 bt_cb(skb)->force_active);
b71d385a
AE
3126
3127 hci_send_frame(skb);
3128 hdev->acl_last_tx = jiffies;
3129
3130 hdev->block_cnt -= blocks;
3131 quote -= blocks;
3132
3133 chan->sent += blocks;
3134 chan->conn->sent += blocks;
3135 }
3136 }
3137
3138 if (cnt != hdev->block_cnt)
bd1eb66b 3139 hci_prio_recalculate(hdev, type);
b71d385a
AE
3140}
3141
6039aa73 3142static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3143{
3144 BT_DBG("%s", hdev->name);
3145
bd1eb66b
AE
3146 /* No ACL link over BR/EDR controller */
3147 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3148 return;
3149
3150 /* No AMP link over AMP controller */
3151 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3152 return;
3153
3154 switch (hdev->flow_ctl_mode) {
3155 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3156 hci_sched_acl_pkt(hdev);
3157 break;
3158
3159 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3160 hci_sched_acl_blk(hdev);
3161 break;
3162 }
3163}
3164
1da177e4 3165/* Schedule SCO */
6039aa73 3166static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3167{
3168 struct hci_conn *conn;
3169 struct sk_buff *skb;
3170 int quote;
3171
3172 BT_DBG("%s", hdev->name);
3173
52087a79
LAD
3174 if (!hci_conn_num(hdev, SCO_LINK))
3175 return;
3176
1da177e4
LT
3177 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3178 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3179 BT_DBG("skb %p len %d", skb, skb->len);
3180 hci_send_frame(skb);
3181
3182 conn->sent++;
3183 if (conn->sent == ~0)
3184 conn->sent = 0;
3185 }
3186 }
3187}
3188
6039aa73 3189static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3190{
3191 struct hci_conn *conn;
3192 struct sk_buff *skb;
3193 int quote;
3194
3195 BT_DBG("%s", hdev->name);
3196
52087a79
LAD
3197 if (!hci_conn_num(hdev, ESCO_LINK))
3198 return;
3199
8fc9ced3
GP
3200 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3201 &quote))) {
b6a0dc82
MH
3202 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3203 BT_DBG("skb %p len %d", skb, skb->len);
3204 hci_send_frame(skb);
3205
3206 conn->sent++;
3207 if (conn->sent == ~0)
3208 conn->sent = 0;
3209 }
3210 }
3211}
3212
6039aa73 3213static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3214{
73d80deb 3215 struct hci_chan *chan;
6ed58ec5 3216 struct sk_buff *skb;
02b20f0b 3217 int quote, cnt, tmp;
6ed58ec5
VT
3218
3219 BT_DBG("%s", hdev->name);
3220
52087a79
LAD
3221 if (!hci_conn_num(hdev, LE_LINK))
3222 return;
3223
6ed58ec5
VT
3224 if (!test_bit(HCI_RAW, &hdev->flags)) {
3225 /* LE tx timeout must be longer than maximum
3226 * link supervision timeout (40.9 seconds) */
bae1f5d9 3227 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3228 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3229 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3230 }
3231
3232 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3233 tmp = cnt;
73d80deb 3234 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3235 u32 priority = (skb_peek(&chan->data_q))->priority;
3236 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3237 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3238 skb->len, skb->priority);
6ed58ec5 3239
ec1cce24
LAD
3240 /* Stop if priority has changed */
3241 if (skb->priority < priority)
3242 break;
3243
3244 skb = skb_dequeue(&chan->data_q);
3245
6ed58ec5
VT
3246 hci_send_frame(skb);
3247 hdev->le_last_tx = jiffies;
3248
3249 cnt--;
73d80deb
LAD
3250 chan->sent++;
3251 chan->conn->sent++;
6ed58ec5
VT
3252 }
3253 }
73d80deb 3254
6ed58ec5
VT
3255 if (hdev->le_pkts)
3256 hdev->le_cnt = cnt;
3257 else
3258 hdev->acl_cnt = cnt;
02b20f0b
LAD
3259
3260 if (cnt != tmp)
3261 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3262}
3263
3eff45ea 3264static void hci_tx_work(struct work_struct *work)
1da177e4 3265{
3eff45ea 3266 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3267 struct sk_buff *skb;
3268
6ed58ec5 3269 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3270 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3271
3272 /* Schedule queues and send stuff to HCI driver */
3273
3274 hci_sched_acl(hdev);
3275
3276 hci_sched_sco(hdev);
3277
b6a0dc82
MH
3278 hci_sched_esco(hdev);
3279
6ed58ec5
VT
3280 hci_sched_le(hdev);
3281
1da177e4
LT
3282 /* Send next queued raw (unknown type) packet */
3283 while ((skb = skb_dequeue(&hdev->raw_q)))
3284 hci_send_frame(skb);
1da177e4
LT
3285}
3286
25985edc 3287/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3288
3289/* ACL data packet */
6039aa73 3290static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3291{
3292 struct hci_acl_hdr *hdr = (void *) skb->data;
3293 struct hci_conn *conn;
3294 __u16 handle, flags;
3295
3296 skb_pull(skb, HCI_ACL_HDR_SIZE);
3297
3298 handle = __le16_to_cpu(hdr->handle);
3299 flags = hci_flags(handle);
3300 handle = hci_handle(handle);
3301
f0e09510 3302 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3303 handle, flags);
1da177e4
LT
3304
3305 hdev->stat.acl_rx++;
3306
3307 hci_dev_lock(hdev);
3308 conn = hci_conn_hash_lookup_handle(hdev, handle);
3309 hci_dev_unlock(hdev);
8e87d142 3310
1da177e4 3311 if (conn) {
65983fc7 3312 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3313
1da177e4 3314 /* Send to upper protocol */
686ebf28
UF
3315 l2cap_recv_acldata(conn, skb, flags);
3316 return;
1da177e4 3317 } else {
8e87d142 3318 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3319 hdev->name, handle);
1da177e4
LT
3320 }
3321
3322 kfree_skb(skb);
3323}
3324
3325/* SCO data packet */
6039aa73 3326static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3327{
3328 struct hci_sco_hdr *hdr = (void *) skb->data;
3329 struct hci_conn *conn;
3330 __u16 handle;
3331
3332 skb_pull(skb, HCI_SCO_HDR_SIZE);
3333
3334 handle = __le16_to_cpu(hdr->handle);
3335
f0e09510 3336 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3337
3338 hdev->stat.sco_rx++;
3339
3340 hci_dev_lock(hdev);
3341 conn = hci_conn_hash_lookup_handle(hdev, handle);
3342 hci_dev_unlock(hdev);
3343
3344 if (conn) {
1da177e4 3345 /* Send to upper protocol */
686ebf28
UF
3346 sco_recv_scodata(conn, skb);
3347 return;
1da177e4 3348 } else {
8e87d142 3349 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3350 hdev->name, handle);
1da177e4
LT
3351 }
3352
3353 kfree_skb(skb);
3354}
3355
9238f36a
JH
3356static bool hci_req_is_complete(struct hci_dev *hdev)
3357{
3358 struct sk_buff *skb;
3359
3360 skb = skb_peek(&hdev->cmd_q);
3361 if (!skb)
3362 return true;
3363
3364 return bt_cb(skb)->req.start;
3365}
3366
42c6b129
JH
3367static void hci_resend_last(struct hci_dev *hdev)
3368{
3369 struct hci_command_hdr *sent;
3370 struct sk_buff *skb;
3371 u16 opcode;
3372
3373 if (!hdev->sent_cmd)
3374 return;
3375
3376 sent = (void *) hdev->sent_cmd->data;
3377 opcode = __le16_to_cpu(sent->opcode);
3378 if (opcode == HCI_OP_RESET)
3379 return;
3380
3381 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3382 if (!skb)
3383 return;
3384
3385 skb_queue_head(&hdev->cmd_q, skb);
3386 queue_work(hdev->workqueue, &hdev->cmd_work);
3387}
3388
9238f36a
JH
3389void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3390{
3391 hci_req_complete_t req_complete = NULL;
3392 struct sk_buff *skb;
3393 unsigned long flags;
3394
3395 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3396
42c6b129
JH
3397 /* If the completed command doesn't match the last one that was
3398 * sent we need to do special handling of it.
9238f36a 3399 */
42c6b129
JH
3400 if (!hci_sent_cmd_data(hdev, opcode)) {
3401 /* Some CSR based controllers generate a spontaneous
3402 * reset complete event during init and any pending
3403 * command will never be completed. In such a case we
3404 * need to resend whatever was the last sent
3405 * command.
3406 */
3407 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3408 hci_resend_last(hdev);
3409
9238f36a 3410 return;
42c6b129 3411 }
9238f36a
JH
3412
3413 /* If the command succeeded and there's still more commands in
3414 * this request the request is not yet complete.
3415 */
3416 if (!status && !hci_req_is_complete(hdev))
3417 return;
3418
3419 /* If this was the last command in a request the complete
3420 * callback would be found in hdev->sent_cmd instead of the
3421 * command queue (hdev->cmd_q).
3422 */
3423 if (hdev->sent_cmd) {
3424 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3425 if (req_complete)
3426 goto call_complete;
3427 }
3428
3429 /* Remove all pending commands belonging to this request */
3430 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3431 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3432 if (bt_cb(skb)->req.start) {
3433 __skb_queue_head(&hdev->cmd_q, skb);
3434 break;
3435 }
3436
3437 req_complete = bt_cb(skb)->req.complete;
3438 kfree_skb(skb);
3439 }
3440 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3441
3442call_complete:
3443 if (req_complete)
3444 req_complete(hdev, status);
3445}
3446
b78752cc 3447static void hci_rx_work(struct work_struct *work)
1da177e4 3448{
b78752cc 3449 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3450 struct sk_buff *skb;
3451
3452 BT_DBG("%s", hdev->name);
3453
1da177e4 3454 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3455 /* Send copy to monitor */
3456 hci_send_to_monitor(hdev, skb);
3457
1da177e4
LT
3458 if (atomic_read(&hdev->promisc)) {
3459 /* Send copy to the sockets */
470fe1b5 3460 hci_send_to_sock(hdev, skb);
1da177e4
LT
3461 }
3462
3463 if (test_bit(HCI_RAW, &hdev->flags)) {
3464 kfree_skb(skb);
3465 continue;
3466 }
3467
3468 if (test_bit(HCI_INIT, &hdev->flags)) {
3469 /* Don't process data packets in this states. */
0d48d939 3470 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3471 case HCI_ACLDATA_PKT:
3472 case HCI_SCODATA_PKT:
3473 kfree_skb(skb);
3474 continue;
3ff50b79 3475 }
1da177e4
LT
3476 }
3477
3478 /* Process frame */
0d48d939 3479 switch (bt_cb(skb)->pkt_type) {
1da177e4 3480 case HCI_EVENT_PKT:
b78752cc 3481 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3482 hci_event_packet(hdev, skb);
3483 break;
3484
3485 case HCI_ACLDATA_PKT:
3486 BT_DBG("%s ACL data packet", hdev->name);
3487 hci_acldata_packet(hdev, skb);
3488 break;
3489
3490 case HCI_SCODATA_PKT:
3491 BT_DBG("%s SCO data packet", hdev->name);
3492 hci_scodata_packet(hdev, skb);
3493 break;
3494
3495 default:
3496 kfree_skb(skb);
3497 break;
3498 }
3499 }
1da177e4
LT
3500}
3501
c347b765 3502static void hci_cmd_work(struct work_struct *work)
1da177e4 3503{
c347b765 3504 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3505 struct sk_buff *skb;
3506
2104786b
AE
3507 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3508 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3509
1da177e4 3510 /* Send queued commands */
5a08ecce
AE
3511 if (atomic_read(&hdev->cmd_cnt)) {
3512 skb = skb_dequeue(&hdev->cmd_q);
3513 if (!skb)
3514 return;
3515
7585b97a 3516 kfree_skb(hdev->sent_cmd);
1da177e4 3517
70f23020
AE
3518 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3519 if (hdev->sent_cmd) {
1da177e4
LT
3520 atomic_dec(&hdev->cmd_cnt);
3521 hci_send_frame(skb);
7bdb8a5c
SJ
3522 if (test_bit(HCI_RESET, &hdev->flags))
3523 del_timer(&hdev->cmd_timer);
3524 else
3525 mod_timer(&hdev->cmd_timer,
5f246e89 3526 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3527 } else {
3528 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3529 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3530 }
3531 }
3532}
2519a1fc
AG
3533
3534int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3535{
3536 /* General inquiry access code (GIAC) */
3537 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3538 struct hci_cp_inquiry cp;
3539
3540 BT_DBG("%s", hdev->name);
3541
3542 if (test_bit(HCI_INQUIRY, &hdev->flags))
3543 return -EINPROGRESS;
3544
4663262c
JH
3545 inquiry_cache_flush(hdev);
3546
2519a1fc
AG
3547 memset(&cp, 0, sizeof(cp));
3548 memcpy(&cp.lap, lap, sizeof(cp.lap));
3549 cp.length = length;
3550
3551 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3552}
023d5049
AG
3553
3554int hci_cancel_inquiry(struct hci_dev *hdev)
3555{
3556 BT_DBG("%s", hdev->name);
3557
3558 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 3559 return -EALREADY;
023d5049
AG
3560
3561 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3562}
31f7956c
AG
3563
3564u8 bdaddr_to_le(u8 bdaddr_type)
3565{
3566 switch (bdaddr_type) {
3567 case BDADDR_LE_PUBLIC:
3568 return ADDR_LE_DEV_PUBLIC;
3569
3570 default:
3571 /* Fallback to LE Random address type */
3572 return ADDR_LE_DEV_RANDOM;
3573 }
3574}