Bluetooth: Track feature pages in a single table
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
7b1abbbe 82struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event)
75e84b7c
JH
83{
84 struct hci_ev_cmd_complete *ev;
85 struct hci_event_hdr *hdr;
86 struct sk_buff *skb;
87
88 hci_dev_lock(hdev);
89
90 skb = hdev->recv_evt;
91 hdev->recv_evt = NULL;
92
93 hci_dev_unlock(hdev);
94
95 if (!skb)
96 return ERR_PTR(-ENODATA);
97
98 if (skb->len < sizeof(*hdr)) {
99 BT_ERR("Too short HCI event");
100 goto failed;
101 }
102
103 hdr = (void *) skb->data;
104 skb_pull(skb, HCI_EVENT_HDR_SIZE);
105
7b1abbbe
JH
106 if (event) {
107 if (hdr->evt != event)
108 goto failed;
109 return skb;
110 }
111
75e84b7c
JH
112 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
113 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
114 goto failed;
115 }
116
117 if (skb->len < sizeof(*ev)) {
118 BT_ERR("Too short cmd_complete event");
119 goto failed;
120 }
121
122 ev = (void *) skb->data;
123 skb_pull(skb, sizeof(*ev));
124
125 if (opcode == __le16_to_cpu(ev->opcode))
126 return skb;
127
128 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
129 __le16_to_cpu(ev->opcode));
130
131failed:
132 kfree_skb(skb);
133 return ERR_PTR(-ENODATA);
134}
135
7b1abbbe
JH
136struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
137 void *param, u8 event, u32 timeout)
75e84b7c
JH
138{
139 DECLARE_WAITQUEUE(wait, current);
140 struct hci_request req;
141 int err = 0;
142
143 BT_DBG("%s", hdev->name);
144
145 hci_req_init(&req, hdev);
146
7b1abbbe 147 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
148
149 hdev->req_status = HCI_REQ_PEND;
150
151 err = hci_req_run(&req, hci_req_sync_complete);
152 if (err < 0)
153 return ERR_PTR(err);
154
155 add_wait_queue(&hdev->req_wait_q, &wait);
156 set_current_state(TASK_INTERRUPTIBLE);
157
158 schedule_timeout(timeout);
159
160 remove_wait_queue(&hdev->req_wait_q, &wait);
161
162 if (signal_pending(current))
163 return ERR_PTR(-EINTR);
164
165 switch (hdev->req_status) {
166 case HCI_REQ_DONE:
167 err = -bt_to_errno(hdev->req_result);
168 break;
169
170 case HCI_REQ_CANCELED:
171 err = -hdev->req_result;
172 break;
173
174 default:
175 err = -ETIMEDOUT;
176 break;
177 }
178
179 hdev->req_status = hdev->req_result = 0;
180
181 BT_DBG("%s end: err %d", hdev->name, err);
182
183 if (err < 0)
184 return ERR_PTR(err);
185
7b1abbbe
JH
186 return hci_get_cmd_complete(hdev, opcode, event);
187}
188EXPORT_SYMBOL(__hci_cmd_sync_ev);
189
190struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
191 void *param, u32 timeout)
192{
193 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
194}
195EXPORT_SYMBOL(__hci_cmd_sync);
196
1da177e4 197/* Execute request and wait for completion. */
01178cd4 198static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
199 void (*func)(struct hci_request *req,
200 unsigned long opt),
01178cd4 201 unsigned long opt, __u32 timeout)
1da177e4 202{
42c6b129 203 struct hci_request req;
1da177e4
LT
204 DECLARE_WAITQUEUE(wait, current);
205 int err = 0;
206
207 BT_DBG("%s start", hdev->name);
208
42c6b129
JH
209 hci_req_init(&req, hdev);
210
1da177e4
LT
211 hdev->req_status = HCI_REQ_PEND;
212
42c6b129 213 func(&req, opt);
53cce22d 214
42c6b129
JH
215 err = hci_req_run(&req, hci_req_sync_complete);
216 if (err < 0) {
53cce22d 217 hdev->req_status = 0;
920c8300
AG
218
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
42c6b129 223 */
920c8300
AG
224 if (err == -ENODATA)
225 return 0;
226
227 return err;
53cce22d
JH
228 }
229
bc4445c7
AG
230 add_wait_queue(&hdev->req_wait_q, &wait);
231 set_current_state(TASK_INTERRUPTIBLE);
232
1da177e4
LT
233 schedule_timeout(timeout);
234
235 remove_wait_queue(&hdev->req_wait_q, &wait);
236
237 if (signal_pending(current))
238 return -EINTR;
239
240 switch (hdev->req_status) {
241 case HCI_REQ_DONE:
e175072f 242 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
243 break;
244
245 case HCI_REQ_CANCELED:
246 err = -hdev->req_result;
247 break;
248
249 default:
250 err = -ETIMEDOUT;
251 break;
3ff50b79 252 }
1da177e4 253
a5040efa 254 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
255
256 BT_DBG("%s end: err %d", hdev->name, err);
257
258 return err;
259}
260
01178cd4 261static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
262 void (*req)(struct hci_request *req,
263 unsigned long opt),
01178cd4 264 unsigned long opt, __u32 timeout)
1da177e4
LT
265{
266 int ret;
267
7c6a329e
MH
268 if (!test_bit(HCI_UP, &hdev->flags))
269 return -ENETDOWN;
270
1da177e4
LT
271 /* Serialize all requests */
272 hci_req_lock(hdev);
01178cd4 273 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
274 hci_req_unlock(hdev);
275
276 return ret;
277}
278
42c6b129 279static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 280{
42c6b129 281 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
282
283 /* Reset device */
42c6b129
JH
284 set_bit(HCI_RESET, &req->hdev->flags);
285 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
286}
287
42c6b129 288static void bredr_init(struct hci_request *req)
1da177e4 289{
42c6b129 290 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 291
1da177e4 292 /* Read Local Supported Features */
42c6b129 293 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 294
1143e5a6 295 /* Read Local Version */
42c6b129 296 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
297
298 /* Read BD Address */
42c6b129 299 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
300}
301
42c6b129 302static void amp_init(struct hci_request *req)
e61ef499 303{
42c6b129 304 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 305
e61ef499 306 /* Read Local Version */
42c6b129 307 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
308
309 /* Read Local AMP Info */
42c6b129 310 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
311
312 /* Read Data Blk size */
42c6b129 313 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
314}
315
42c6b129 316static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 317{
42c6b129 318 struct hci_dev *hdev = req->hdev;
e61ef499
AE
319
320 BT_DBG("%s %ld", hdev->name, opt);
321
11778716
AE
322 /* Reset */
323 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 324 hci_reset_req(req, 0);
11778716 325
e61ef499
AE
326 switch (hdev->dev_type) {
327 case HCI_BREDR:
42c6b129 328 bredr_init(req);
e61ef499
AE
329 break;
330
331 case HCI_AMP:
42c6b129 332 amp_init(req);
e61ef499
AE
333 break;
334
335 default:
336 BT_ERR("Unknown device type %d", hdev->dev_type);
337 break;
338 }
e61ef499
AE
339}
340
42c6b129 341static void bredr_setup(struct hci_request *req)
2177bab5
JH
342{
343 struct hci_cp_delete_stored_link_key cp;
344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
349
350 /* Read Class of Device */
42c6b129 351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
352
353 /* Read Local Name */
42c6b129 354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
355
356 /* Read Voice Setting */
42c6b129 357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
42c6b129 365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
366
367 bacpy(&cp.bdaddr, BDADDR_ANY);
368 cp.delete_all = 0x01;
42c6b129 369 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
f332ec66
JH
370
371 /* Read page scan parameters */
372 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
373 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
375 }
2177bab5
JH
376}
377
42c6b129 378static void le_setup(struct hci_request *req)
2177bab5
JH
379{
380 /* Read LE Buffer Size */
42c6b129 381 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
382
383 /* Read LE Local Supported Features */
42c6b129 384 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
385
386 /* Read LE Advertising Channel TX Power */
42c6b129 387 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
388
389 /* Read LE White List Size */
42c6b129 390 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
391
392 /* Read LE Supported States */
42c6b129 393 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
2177bab5
JH
394}
395
396static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
397{
398 if (lmp_ext_inq_capable(hdev))
399 return 0x02;
400
401 if (lmp_inq_rssi_capable(hdev))
402 return 0x01;
403
404 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
405 hdev->lmp_subver == 0x0757)
406 return 0x01;
407
408 if (hdev->manufacturer == 15) {
409 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
410 return 0x01;
411 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
414 return 0x01;
415 }
416
417 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
418 hdev->lmp_subver == 0x1805)
419 return 0x01;
420
421 return 0x00;
422}
423
42c6b129 424static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
425{
426 u8 mode;
427
42c6b129 428 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 429
42c6b129 430 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
431}
432
42c6b129 433static void hci_setup_event_mask(struct hci_request *req)
2177bab5 434{
42c6b129
JH
435 struct hci_dev *hdev = req->hdev;
436
2177bab5
JH
437 /* The second byte is 0xff instead of 0x9f (two reserved bits
438 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
439 * command otherwise.
440 */
441 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
442
443 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
444 * any event mask for pre 1.2 devices.
445 */
446 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
447 return;
448
449 if (lmp_bredr_capable(hdev)) {
450 events[4] |= 0x01; /* Flow Specification Complete */
451 events[4] |= 0x02; /* Inquiry Result with RSSI */
452 events[4] |= 0x04; /* Read Remote Extended Features Complete */
453 events[5] |= 0x08; /* Synchronous Connection Complete */
454 events[5] |= 0x10; /* Synchronous Connection Changed */
455 }
456
457 if (lmp_inq_rssi_capable(hdev))
458 events[4] |= 0x02; /* Inquiry Result with RSSI */
459
460 if (lmp_sniffsubr_capable(hdev))
461 events[5] |= 0x20; /* Sniff Subrating */
462
463 if (lmp_pause_enc_capable(hdev))
464 events[5] |= 0x80; /* Encryption Key Refresh Complete */
465
466 if (lmp_ext_inq_capable(hdev))
467 events[5] |= 0x40; /* Extended Inquiry Result */
468
469 if (lmp_no_flush_capable(hdev))
470 events[7] |= 0x01; /* Enhanced Flush Complete */
471
472 if (lmp_lsto_capable(hdev))
473 events[6] |= 0x80; /* Link Supervision Timeout Changed */
474
475 if (lmp_ssp_capable(hdev)) {
476 events[6] |= 0x01; /* IO Capability Request */
477 events[6] |= 0x02; /* IO Capability Response */
478 events[6] |= 0x04; /* User Confirmation Request */
479 events[6] |= 0x08; /* User Passkey Request */
480 events[6] |= 0x10; /* Remote OOB Data Request */
481 events[6] |= 0x20; /* Simple Pairing Complete */
482 events[7] |= 0x04; /* User Passkey Notification */
483 events[7] |= 0x08; /* Keypress Notification */
484 events[7] |= 0x10; /* Remote Host Supported
485 * Features Notification
486 */
487 }
488
489 if (lmp_le_capable(hdev))
490 events[7] |= 0x20; /* LE Meta-Event */
491
42c6b129 492 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
493
494 if (lmp_le_capable(hdev)) {
495 memset(events, 0, sizeof(events));
496 events[0] = 0x1f;
42c6b129
JH
497 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
498 sizeof(events), events);
2177bab5
JH
499 }
500}
501
42c6b129 502static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 503{
42c6b129
JH
504 struct hci_dev *hdev = req->hdev;
505
2177bab5 506 if (lmp_bredr_capable(hdev))
42c6b129 507 bredr_setup(req);
2177bab5
JH
508
509 if (lmp_le_capable(hdev))
42c6b129 510 le_setup(req);
2177bab5 511
42c6b129 512 hci_setup_event_mask(req);
2177bab5
JH
513
514 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 515 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
516
517 if (lmp_ssp_capable(hdev)) {
518 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
519 u8 mode = 0x01;
42c6b129
JH
520 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
521 sizeof(mode), &mode);
2177bab5
JH
522 } else {
523 struct hci_cp_write_eir cp;
524
525 memset(hdev->eir, 0, sizeof(hdev->eir));
526 memset(&cp, 0, sizeof(cp));
527
42c6b129 528 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
529 }
530 }
531
532 if (lmp_inq_rssi_capable(hdev))
42c6b129 533 hci_setup_inquiry_mode(req);
2177bab5
JH
534
535 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 536 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
537
538 if (lmp_ext_feat_capable(hdev)) {
539 struct hci_cp_read_local_ext_features cp;
540
541 cp.page = 0x01;
42c6b129
JH
542 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
543 sizeof(cp), &cp);
2177bab5
JH
544 }
545
546 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
547 u8 enable = 1;
42c6b129
JH
548 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
549 &enable);
2177bab5
JH
550 }
551}
552
42c6b129 553static void hci_setup_link_policy(struct hci_request *req)
2177bab5 554{
42c6b129 555 struct hci_dev *hdev = req->hdev;
2177bab5
JH
556 struct hci_cp_write_def_link_policy cp;
557 u16 link_policy = 0;
558
559 if (lmp_rswitch_capable(hdev))
560 link_policy |= HCI_LP_RSWITCH;
561 if (lmp_hold_capable(hdev))
562 link_policy |= HCI_LP_HOLD;
563 if (lmp_sniff_capable(hdev))
564 link_policy |= HCI_LP_SNIFF;
565 if (lmp_park_capable(hdev))
566 link_policy |= HCI_LP_PARK;
567
568 cp.policy = cpu_to_le16(link_policy);
42c6b129 569 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
570}
571
42c6b129 572static void hci_set_le_support(struct hci_request *req)
2177bab5 573{
42c6b129 574 struct hci_dev *hdev = req->hdev;
2177bab5
JH
575 struct hci_cp_write_le_host_supported cp;
576
577 memset(&cp, 0, sizeof(cp));
578
579 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
580 cp.le = 0x01;
581 cp.simul = lmp_le_br_capable(hdev);
582 }
583
584 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
585 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
586 &cp);
2177bab5
JH
587}
588
42c6b129 589static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 590{
42c6b129
JH
591 struct hci_dev *hdev = req->hdev;
592
2177bab5 593 if (hdev->commands[5] & 0x10)
42c6b129 594 hci_setup_link_policy(req);
2177bab5 595
04b4edcb 596 if (lmp_le_capable(hdev)) {
42c6b129 597 hci_set_le_support(req);
04b4edcb
JH
598 hci_update_ad(req);
599 }
2177bab5
JH
600}
601
602static int __hci_init(struct hci_dev *hdev)
603{
604 int err;
605
606 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
607 if (err < 0)
608 return err;
609
610 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
611 * BR/EDR/LE type controllers. AMP controllers only need the
612 * first stage init.
613 */
614 if (hdev->dev_type != HCI_BREDR)
615 return 0;
616
617 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
618 if (err < 0)
619 return err;
620
621 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
622}
623
42c6b129 624static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
625{
626 __u8 scan = opt;
627
42c6b129 628 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
629
630 /* Inquiry and Page scans */
42c6b129 631 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
632}
633
42c6b129 634static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
635{
636 __u8 auth = opt;
637
42c6b129 638 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
639
640 /* Authentication */
42c6b129 641 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
642}
643
42c6b129 644static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
645{
646 __u8 encrypt = opt;
647
42c6b129 648 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 649
e4e8e37c 650 /* Encryption */
42c6b129 651 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
652}
653
42c6b129 654static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
655{
656 __le16 policy = cpu_to_le16(opt);
657
42c6b129 658 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
659
660 /* Default link policy */
42c6b129 661 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
662}
663
8e87d142 664/* Get HCI device by index.
1da177e4
LT
665 * Device is held on return. */
666struct hci_dev *hci_dev_get(int index)
667{
8035ded4 668 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
669
670 BT_DBG("%d", index);
671
672 if (index < 0)
673 return NULL;
674
675 read_lock(&hci_dev_list_lock);
8035ded4 676 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
677 if (d->id == index) {
678 hdev = hci_dev_hold(d);
679 break;
680 }
681 }
682 read_unlock(&hci_dev_list_lock);
683 return hdev;
684}
1da177e4
LT
685
686/* ---- Inquiry support ---- */
ff9ef578 687
30dc78e1
JH
688bool hci_discovery_active(struct hci_dev *hdev)
689{
690 struct discovery_state *discov = &hdev->discovery;
691
6fbe195d 692 switch (discov->state) {
343f935b 693 case DISCOVERY_FINDING:
6fbe195d 694 case DISCOVERY_RESOLVING:
30dc78e1
JH
695 return true;
696
6fbe195d
AG
697 default:
698 return false;
699 }
30dc78e1
JH
700}
701
ff9ef578
JH
702void hci_discovery_set_state(struct hci_dev *hdev, int state)
703{
704 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
705
706 if (hdev->discovery.state == state)
707 return;
708
709 switch (state) {
710 case DISCOVERY_STOPPED:
7b99b659
AG
711 if (hdev->discovery.state != DISCOVERY_STARTING)
712 mgmt_discovering(hdev, 0);
ff9ef578
JH
713 break;
714 case DISCOVERY_STARTING:
715 break;
343f935b 716 case DISCOVERY_FINDING:
ff9ef578
JH
717 mgmt_discovering(hdev, 1);
718 break;
30dc78e1
JH
719 case DISCOVERY_RESOLVING:
720 break;
ff9ef578
JH
721 case DISCOVERY_STOPPING:
722 break;
723 }
724
725 hdev->discovery.state = state;
726}
727
1da177e4
LT
728static void inquiry_cache_flush(struct hci_dev *hdev)
729{
30883512 730 struct discovery_state *cache = &hdev->discovery;
b57c1a56 731 struct inquiry_entry *p, *n;
1da177e4 732
561aafbc
JH
733 list_for_each_entry_safe(p, n, &cache->all, all) {
734 list_del(&p->all);
b57c1a56 735 kfree(p);
1da177e4 736 }
561aafbc
JH
737
738 INIT_LIST_HEAD(&cache->unknown);
739 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
740}
741
a8c5fb1a
GP
742struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
743 bdaddr_t *bdaddr)
1da177e4 744{
30883512 745 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
746 struct inquiry_entry *e;
747
6ed93dc6 748 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 749
561aafbc
JH
750 list_for_each_entry(e, &cache->all, all) {
751 if (!bacmp(&e->data.bdaddr, bdaddr))
752 return e;
753 }
754
755 return NULL;
756}
757
758struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 759 bdaddr_t *bdaddr)
561aafbc 760{
30883512 761 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
762 struct inquiry_entry *e;
763
6ed93dc6 764 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
765
766 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 767 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
768 return e;
769 }
770
771 return NULL;
1da177e4
LT
772}
773
30dc78e1 774struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
775 bdaddr_t *bdaddr,
776 int state)
30dc78e1
JH
777{
778 struct discovery_state *cache = &hdev->discovery;
779 struct inquiry_entry *e;
780
6ed93dc6 781 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
782
783 list_for_each_entry(e, &cache->resolve, list) {
784 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
785 return e;
786 if (!bacmp(&e->data.bdaddr, bdaddr))
787 return e;
788 }
789
790 return NULL;
791}
792
a3d4e20a 793void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 794 struct inquiry_entry *ie)
a3d4e20a
JH
795{
796 struct discovery_state *cache = &hdev->discovery;
797 struct list_head *pos = &cache->resolve;
798 struct inquiry_entry *p;
799
800 list_del(&ie->list);
801
802 list_for_each_entry(p, &cache->resolve, list) {
803 if (p->name_state != NAME_PENDING &&
a8c5fb1a 804 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
805 break;
806 pos = &p->list;
807 }
808
809 list_add(&ie->list, pos);
810}
811
3175405b 812bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 813 bool name_known, bool *ssp)
1da177e4 814{
30883512 815 struct discovery_state *cache = &hdev->discovery;
70f23020 816 struct inquiry_entry *ie;
1da177e4 817
6ed93dc6 818 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 819
2b2fec4d
SJ
820 hci_remove_remote_oob_data(hdev, &data->bdaddr);
821
388fc8fa
JH
822 if (ssp)
823 *ssp = data->ssp_mode;
824
70f23020 825 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 826 if (ie) {
388fc8fa
JH
827 if (ie->data.ssp_mode && ssp)
828 *ssp = true;
829
a3d4e20a 830 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 831 data->rssi != ie->data.rssi) {
a3d4e20a
JH
832 ie->data.rssi = data->rssi;
833 hci_inquiry_cache_update_resolve(hdev, ie);
834 }
835
561aafbc 836 goto update;
a3d4e20a 837 }
561aafbc
JH
838
839 /* Entry not in the cache. Add new one. */
840 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
841 if (!ie)
3175405b 842 return false;
561aafbc
JH
843
844 list_add(&ie->all, &cache->all);
845
846 if (name_known) {
847 ie->name_state = NAME_KNOWN;
848 } else {
849 ie->name_state = NAME_NOT_KNOWN;
850 list_add(&ie->list, &cache->unknown);
851 }
70f23020 852
561aafbc
JH
853update:
854 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 855 ie->name_state != NAME_PENDING) {
561aafbc
JH
856 ie->name_state = NAME_KNOWN;
857 list_del(&ie->list);
1da177e4
LT
858 }
859
70f23020
AE
860 memcpy(&ie->data, data, sizeof(*data));
861 ie->timestamp = jiffies;
1da177e4 862 cache->timestamp = jiffies;
3175405b
JH
863
864 if (ie->name_state == NAME_NOT_KNOWN)
865 return false;
866
867 return true;
1da177e4
LT
868}
869
870static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
871{
30883512 872 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
873 struct inquiry_info *info = (struct inquiry_info *) buf;
874 struct inquiry_entry *e;
875 int copied = 0;
876
561aafbc 877 list_for_each_entry(e, &cache->all, all) {
1da177e4 878 struct inquiry_data *data = &e->data;
b57c1a56
JH
879
880 if (copied >= num)
881 break;
882
1da177e4
LT
883 bacpy(&info->bdaddr, &data->bdaddr);
884 info->pscan_rep_mode = data->pscan_rep_mode;
885 info->pscan_period_mode = data->pscan_period_mode;
886 info->pscan_mode = data->pscan_mode;
887 memcpy(info->dev_class, data->dev_class, 3);
888 info->clock_offset = data->clock_offset;
b57c1a56 889
1da177e4 890 info++;
b57c1a56 891 copied++;
1da177e4
LT
892 }
893
894 BT_DBG("cache %p, copied %d", cache, copied);
895 return copied;
896}
897
42c6b129 898static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
899{
900 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 901 struct hci_dev *hdev = req->hdev;
1da177e4
LT
902 struct hci_cp_inquiry cp;
903
904 BT_DBG("%s", hdev->name);
905
906 if (test_bit(HCI_INQUIRY, &hdev->flags))
907 return;
908
909 /* Start Inquiry */
910 memcpy(&cp.lap, &ir->lap, 3);
911 cp.length = ir->length;
912 cp.num_rsp = ir->num_rsp;
42c6b129 913 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
914}
915
3e13fa1e
AG
916static int wait_inquiry(void *word)
917{
918 schedule();
919 return signal_pending(current);
920}
921
1da177e4
LT
922int hci_inquiry(void __user *arg)
923{
924 __u8 __user *ptr = arg;
925 struct hci_inquiry_req ir;
926 struct hci_dev *hdev;
927 int err = 0, do_inquiry = 0, max_rsp;
928 long timeo;
929 __u8 *buf;
930
931 if (copy_from_user(&ir, ptr, sizeof(ir)))
932 return -EFAULT;
933
5a08ecce
AE
934 hdev = hci_dev_get(ir.dev_id);
935 if (!hdev)
1da177e4
LT
936 return -ENODEV;
937
09fd0de5 938 hci_dev_lock(hdev);
8e87d142 939 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 940 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
941 inquiry_cache_flush(hdev);
942 do_inquiry = 1;
943 }
09fd0de5 944 hci_dev_unlock(hdev);
1da177e4 945
04837f64 946 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
947
948 if (do_inquiry) {
01178cd4
JH
949 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
950 timeo);
70f23020
AE
951 if (err < 0)
952 goto done;
3e13fa1e
AG
953
954 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
955 * cleared). If it is interrupted by a signal, return -EINTR.
956 */
957 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
958 TASK_INTERRUPTIBLE))
959 return -EINTR;
70f23020 960 }
1da177e4 961
8fc9ced3
GP
962 /* for unlimited number of responses we will use buffer with
963 * 255 entries
964 */
1da177e4
LT
965 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
966
967 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
968 * copy it to the user space.
969 */
01df8c31 970 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 971 if (!buf) {
1da177e4
LT
972 err = -ENOMEM;
973 goto done;
974 }
975
09fd0de5 976 hci_dev_lock(hdev);
1da177e4 977 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 978 hci_dev_unlock(hdev);
1da177e4
LT
979
980 BT_DBG("num_rsp %d", ir.num_rsp);
981
982 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
983 ptr += sizeof(ir);
984 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 985 ir.num_rsp))
1da177e4 986 err = -EFAULT;
8e87d142 987 } else
1da177e4
LT
988 err = -EFAULT;
989
990 kfree(buf);
991
992done:
993 hci_dev_put(hdev);
994 return err;
995}
996
3f0f524b
JH
997static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
998{
999 u8 ad_len = 0, flags = 0;
1000 size_t name_len;
1001
1002 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1003 flags |= LE_AD_GENERAL;
1004
1005 if (!lmp_bredr_capable(hdev))
1006 flags |= LE_AD_NO_BREDR;
1007
1008 if (lmp_le_br_capable(hdev))
1009 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1010
1011 if (lmp_host_le_br_capable(hdev))
1012 flags |= LE_AD_SIM_LE_BREDR_HOST;
1013
1014 if (flags) {
1015 BT_DBG("adv flags 0x%02x", flags);
1016
1017 ptr[0] = 2;
1018 ptr[1] = EIR_FLAGS;
1019 ptr[2] = flags;
1020
1021 ad_len += 3;
1022 ptr += 3;
1023 }
1024
1025 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1026 ptr[0] = 2;
1027 ptr[1] = EIR_TX_POWER;
1028 ptr[2] = (u8) hdev->adv_tx_power;
1029
1030 ad_len += 3;
1031 ptr += 3;
1032 }
1033
1034 name_len = strlen(hdev->dev_name);
1035 if (name_len > 0) {
1036 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1037
1038 if (name_len > max_len) {
1039 name_len = max_len;
1040 ptr[1] = EIR_NAME_SHORT;
1041 } else
1042 ptr[1] = EIR_NAME_COMPLETE;
1043
1044 ptr[0] = name_len + 1;
1045
1046 memcpy(ptr + 2, hdev->dev_name, name_len);
1047
1048 ad_len += (name_len + 2);
1049 ptr += (name_len + 2);
1050 }
1051
1052 return ad_len;
1053}
1054
04b4edcb 1055void hci_update_ad(struct hci_request *req)
3f0f524b 1056{
04b4edcb 1057 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
1058 struct hci_cp_le_set_adv_data cp;
1059 u8 len;
3f0f524b 1060
04b4edcb
JH
1061 if (!lmp_le_capable(hdev))
1062 return;
3f0f524b
JH
1063
1064 memset(&cp, 0, sizeof(cp));
1065
1066 len = create_ad(hdev, cp.data);
1067
1068 if (hdev->adv_data_len == len &&
04b4edcb
JH
1069 memcmp(cp.data, hdev->adv_data, len) == 0)
1070 return;
3f0f524b
JH
1071
1072 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1073 hdev->adv_data_len = len;
1074
1075 cp.length = len;
3f0f524b 1076
04b4edcb 1077 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
1078}
1079
1da177e4
LT
1080/* ---- HCI ioctl helpers ---- */
1081
1082int hci_dev_open(__u16 dev)
1083{
1084 struct hci_dev *hdev;
1085 int ret = 0;
1086
5a08ecce
AE
1087 hdev = hci_dev_get(dev);
1088 if (!hdev)
1da177e4
LT
1089 return -ENODEV;
1090
1091 BT_DBG("%s %p", hdev->name, hdev);
1092
1093 hci_req_lock(hdev);
1094
94324962
JH
1095 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1096 ret = -ENODEV;
1097 goto done;
1098 }
1099
611b30f7
MH
1100 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1101 ret = -ERFKILL;
1102 goto done;
1103 }
1104
1da177e4
LT
1105 if (test_bit(HCI_UP, &hdev->flags)) {
1106 ret = -EALREADY;
1107 goto done;
1108 }
1109
1da177e4
LT
1110 if (hdev->open(hdev)) {
1111 ret = -EIO;
1112 goto done;
1113 }
1114
f41c70c4
MH
1115 atomic_set(&hdev->cmd_cnt, 1);
1116 set_bit(HCI_INIT, &hdev->flags);
1117
1118 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1119 ret = hdev->setup(hdev);
1120
1121 if (!ret) {
1122 /* Treat all non BR/EDR controllers as raw devices if
1123 * enable_hs is not set.
1124 */
1125 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1126 set_bit(HCI_RAW, &hdev->flags);
1127
1128 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1129 set_bit(HCI_RAW, &hdev->flags);
1130
1131 if (!test_bit(HCI_RAW, &hdev->flags))
1132 ret = __hci_init(hdev);
1da177e4
LT
1133 }
1134
f41c70c4
MH
1135 clear_bit(HCI_INIT, &hdev->flags);
1136
1da177e4
LT
1137 if (!ret) {
1138 hci_dev_hold(hdev);
1139 set_bit(HCI_UP, &hdev->flags);
1140 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a
AE
1141 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1142 mgmt_valid_hdev(hdev)) {
09fd0de5 1143 hci_dev_lock(hdev);
744cf19e 1144 mgmt_powered(hdev, 1);
09fd0de5 1145 hci_dev_unlock(hdev);
56e5cb86 1146 }
8e87d142 1147 } else {
1da177e4 1148 /* Init failed, cleanup */
3eff45ea 1149 flush_work(&hdev->tx_work);
c347b765 1150 flush_work(&hdev->cmd_work);
b78752cc 1151 flush_work(&hdev->rx_work);
1da177e4
LT
1152
1153 skb_queue_purge(&hdev->cmd_q);
1154 skb_queue_purge(&hdev->rx_q);
1155
1156 if (hdev->flush)
1157 hdev->flush(hdev);
1158
1159 if (hdev->sent_cmd) {
1160 kfree_skb(hdev->sent_cmd);
1161 hdev->sent_cmd = NULL;
1162 }
1163
1164 hdev->close(hdev);
1165 hdev->flags = 0;
1166 }
1167
1168done:
1169 hci_req_unlock(hdev);
1170 hci_dev_put(hdev);
1171 return ret;
1172}
1173
1174static int hci_dev_do_close(struct hci_dev *hdev)
1175{
1176 BT_DBG("%s %p", hdev->name, hdev);
1177
28b75a89
AG
1178 cancel_work_sync(&hdev->le_scan);
1179
78c04c0b
VCG
1180 cancel_delayed_work(&hdev->power_off);
1181
1da177e4
LT
1182 hci_req_cancel(hdev, ENODEV);
1183 hci_req_lock(hdev);
1184
1185 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1186 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1187 hci_req_unlock(hdev);
1188 return 0;
1189 }
1190
3eff45ea
GP
1191 /* Flush RX and TX works */
1192 flush_work(&hdev->tx_work);
b78752cc 1193 flush_work(&hdev->rx_work);
1da177e4 1194
16ab91ab 1195 if (hdev->discov_timeout > 0) {
e0f9309f 1196 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1197 hdev->discov_timeout = 0;
5e5282bb 1198 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1199 }
1200
a8b2d5c2 1201 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1202 cancel_delayed_work(&hdev->service_cache);
1203
7ba8b4be
AG
1204 cancel_delayed_work_sync(&hdev->le_scan_disable);
1205
09fd0de5 1206 hci_dev_lock(hdev);
1da177e4
LT
1207 inquiry_cache_flush(hdev);
1208 hci_conn_hash_flush(hdev);
09fd0de5 1209 hci_dev_unlock(hdev);
1da177e4
LT
1210
1211 hci_notify(hdev, HCI_DEV_DOWN);
1212
1213 if (hdev->flush)
1214 hdev->flush(hdev);
1215
1216 /* Reset device */
1217 skb_queue_purge(&hdev->cmd_q);
1218 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1219 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1220 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1221 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1222 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1223 clear_bit(HCI_INIT, &hdev->flags);
1224 }
1225
c347b765
GP
1226 /* flush cmd work */
1227 flush_work(&hdev->cmd_work);
1da177e4
LT
1228
1229 /* Drop queues */
1230 skb_queue_purge(&hdev->rx_q);
1231 skb_queue_purge(&hdev->cmd_q);
1232 skb_queue_purge(&hdev->raw_q);
1233
1234 /* Drop last sent command */
1235 if (hdev->sent_cmd) {
b79f44c1 1236 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1237 kfree_skb(hdev->sent_cmd);
1238 hdev->sent_cmd = NULL;
1239 }
1240
b6ddb638
JH
1241 kfree_skb(hdev->recv_evt);
1242 hdev->recv_evt = NULL;
1243
1da177e4
LT
1244 /* After this point our queues are empty
1245 * and no tasks are scheduled. */
1246 hdev->close(hdev);
1247
35b973c9
JH
1248 /* Clear flags */
1249 hdev->flags = 0;
1250 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1251
bb4b2a9a
AE
1252 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1253 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1254 hci_dev_lock(hdev);
1255 mgmt_powered(hdev, 0);
1256 hci_dev_unlock(hdev);
1257 }
5add6af8 1258
ced5c338
AE
1259 /* Controller radio is available but is currently powered down */
1260 hdev->amp_status = 0;
1261
e59fda8d 1262 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1263 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1264
1da177e4
LT
1265 hci_req_unlock(hdev);
1266
1267 hci_dev_put(hdev);
1268 return 0;
1269}
1270
1271int hci_dev_close(__u16 dev)
1272{
1273 struct hci_dev *hdev;
1274 int err;
1275
70f23020
AE
1276 hdev = hci_dev_get(dev);
1277 if (!hdev)
1da177e4 1278 return -ENODEV;
8ee56540
MH
1279
1280 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1281 cancel_delayed_work(&hdev->power_off);
1282
1da177e4 1283 err = hci_dev_do_close(hdev);
8ee56540 1284
1da177e4
LT
1285 hci_dev_put(hdev);
1286 return err;
1287}
1288
1289int hci_dev_reset(__u16 dev)
1290{
1291 struct hci_dev *hdev;
1292 int ret = 0;
1293
70f23020
AE
1294 hdev = hci_dev_get(dev);
1295 if (!hdev)
1da177e4
LT
1296 return -ENODEV;
1297
1298 hci_req_lock(hdev);
1da177e4
LT
1299
1300 if (!test_bit(HCI_UP, &hdev->flags))
1301 goto done;
1302
1303 /* Drop queues */
1304 skb_queue_purge(&hdev->rx_q);
1305 skb_queue_purge(&hdev->cmd_q);
1306
09fd0de5 1307 hci_dev_lock(hdev);
1da177e4
LT
1308 inquiry_cache_flush(hdev);
1309 hci_conn_hash_flush(hdev);
09fd0de5 1310 hci_dev_unlock(hdev);
1da177e4
LT
1311
1312 if (hdev->flush)
1313 hdev->flush(hdev);
1314
8e87d142 1315 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1316 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1317
1318 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1319 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1320
1321done:
1da177e4
LT
1322 hci_req_unlock(hdev);
1323 hci_dev_put(hdev);
1324 return ret;
1325}
1326
1327int hci_dev_reset_stat(__u16 dev)
1328{
1329 struct hci_dev *hdev;
1330 int ret = 0;
1331
70f23020
AE
1332 hdev = hci_dev_get(dev);
1333 if (!hdev)
1da177e4
LT
1334 return -ENODEV;
1335
1336 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1337
1338 hci_dev_put(hdev);
1339
1340 return ret;
1341}
1342
1343int hci_dev_cmd(unsigned int cmd, void __user *arg)
1344{
1345 struct hci_dev *hdev;
1346 struct hci_dev_req dr;
1347 int err = 0;
1348
1349 if (copy_from_user(&dr, arg, sizeof(dr)))
1350 return -EFAULT;
1351
70f23020
AE
1352 hdev = hci_dev_get(dr.dev_id);
1353 if (!hdev)
1da177e4
LT
1354 return -ENODEV;
1355
1356 switch (cmd) {
1357 case HCISETAUTH:
01178cd4
JH
1358 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1359 HCI_INIT_TIMEOUT);
1da177e4
LT
1360 break;
1361
1362 case HCISETENCRYPT:
1363 if (!lmp_encrypt_capable(hdev)) {
1364 err = -EOPNOTSUPP;
1365 break;
1366 }
1367
1368 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1369 /* Auth must be enabled first */
01178cd4
JH
1370 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1371 HCI_INIT_TIMEOUT);
1da177e4
LT
1372 if (err)
1373 break;
1374 }
1375
01178cd4
JH
1376 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1377 HCI_INIT_TIMEOUT);
1da177e4
LT
1378 break;
1379
1380 case HCISETSCAN:
01178cd4
JH
1381 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1382 HCI_INIT_TIMEOUT);
1da177e4
LT
1383 break;
1384
1da177e4 1385 case HCISETLINKPOL:
01178cd4
JH
1386 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1387 HCI_INIT_TIMEOUT);
1da177e4
LT
1388 break;
1389
1390 case HCISETLINKMODE:
e4e8e37c
MH
1391 hdev->link_mode = ((__u16) dr.dev_opt) &
1392 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1393 break;
1394
1395 case HCISETPTYPE:
1396 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1397 break;
1398
1399 case HCISETACLMTU:
e4e8e37c
MH
1400 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1401 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1402 break;
1403
1404 case HCISETSCOMTU:
e4e8e37c
MH
1405 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1406 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1407 break;
1408
1409 default:
1410 err = -EINVAL;
1411 break;
1412 }
e4e8e37c 1413
1da177e4
LT
1414 hci_dev_put(hdev);
1415 return err;
1416}
1417
1418int hci_get_dev_list(void __user *arg)
1419{
8035ded4 1420 struct hci_dev *hdev;
1da177e4
LT
1421 struct hci_dev_list_req *dl;
1422 struct hci_dev_req *dr;
1da177e4
LT
1423 int n = 0, size, err;
1424 __u16 dev_num;
1425
1426 if (get_user(dev_num, (__u16 __user *) arg))
1427 return -EFAULT;
1428
1429 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1430 return -EINVAL;
1431
1432 size = sizeof(*dl) + dev_num * sizeof(*dr);
1433
70f23020
AE
1434 dl = kzalloc(size, GFP_KERNEL);
1435 if (!dl)
1da177e4
LT
1436 return -ENOMEM;
1437
1438 dr = dl->dev_req;
1439
f20d09d5 1440 read_lock(&hci_dev_list_lock);
8035ded4 1441 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1442 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1443 cancel_delayed_work(&hdev->power_off);
c542a06c 1444
a8b2d5c2
JH
1445 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1446 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1447
1da177e4
LT
1448 (dr + n)->dev_id = hdev->id;
1449 (dr + n)->dev_opt = hdev->flags;
c542a06c 1450
1da177e4
LT
1451 if (++n >= dev_num)
1452 break;
1453 }
f20d09d5 1454 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1455
1456 dl->dev_num = n;
1457 size = sizeof(*dl) + n * sizeof(*dr);
1458
1459 err = copy_to_user(arg, dl, size);
1460 kfree(dl);
1461
1462 return err ? -EFAULT : 0;
1463}
1464
1465int hci_get_dev_info(void __user *arg)
1466{
1467 struct hci_dev *hdev;
1468 struct hci_dev_info di;
1469 int err = 0;
1470
1471 if (copy_from_user(&di, arg, sizeof(di)))
1472 return -EFAULT;
1473
70f23020
AE
1474 hdev = hci_dev_get(di.dev_id);
1475 if (!hdev)
1da177e4
LT
1476 return -ENODEV;
1477
a8b2d5c2 1478 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1479 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1480
a8b2d5c2
JH
1481 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1482 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1483
1da177e4
LT
1484 strcpy(di.name, hdev->name);
1485 di.bdaddr = hdev->bdaddr;
943da25d 1486 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1487 di.flags = hdev->flags;
1488 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1489 if (lmp_bredr_capable(hdev)) {
1490 di.acl_mtu = hdev->acl_mtu;
1491 di.acl_pkts = hdev->acl_pkts;
1492 di.sco_mtu = hdev->sco_mtu;
1493 di.sco_pkts = hdev->sco_pkts;
1494 } else {
1495 di.acl_mtu = hdev->le_mtu;
1496 di.acl_pkts = hdev->le_pkts;
1497 di.sco_mtu = 0;
1498 di.sco_pkts = 0;
1499 }
1da177e4
LT
1500 di.link_policy = hdev->link_policy;
1501 di.link_mode = hdev->link_mode;
1502
1503 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1504 memcpy(&di.features, &hdev->features, sizeof(di.features));
1505
1506 if (copy_to_user(arg, &di, sizeof(di)))
1507 err = -EFAULT;
1508
1509 hci_dev_put(hdev);
1510
1511 return err;
1512}
1513
1514/* ---- Interface to HCI drivers ---- */
1515
611b30f7
MH
1516static int hci_rfkill_set_block(void *data, bool blocked)
1517{
1518 struct hci_dev *hdev = data;
1519
1520 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1521
1522 if (!blocked)
1523 return 0;
1524
1525 hci_dev_do_close(hdev);
1526
1527 return 0;
1528}
1529
1530static const struct rfkill_ops hci_rfkill_ops = {
1531 .set_block = hci_rfkill_set_block,
1532};
1533
ab81cbf9
JH
1534static void hci_power_on(struct work_struct *work)
1535{
1536 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1537
1538 BT_DBG("%s", hdev->name);
1539
1540 if (hci_dev_open(hdev->id) < 0)
1541 return;
1542
a8b2d5c2 1543 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1544 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1545 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1546
a8b2d5c2 1547 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1548 mgmt_index_added(hdev);
ab81cbf9
JH
1549}
1550
1551static void hci_power_off(struct work_struct *work)
1552{
3243553f 1553 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1554 power_off.work);
ab81cbf9
JH
1555
1556 BT_DBG("%s", hdev->name);
1557
8ee56540 1558 hci_dev_do_close(hdev);
ab81cbf9
JH
1559}
1560
16ab91ab
JH
1561static void hci_discov_off(struct work_struct *work)
1562{
1563 struct hci_dev *hdev;
1564 u8 scan = SCAN_PAGE;
1565
1566 hdev = container_of(work, struct hci_dev, discov_off.work);
1567
1568 BT_DBG("%s", hdev->name);
1569
09fd0de5 1570 hci_dev_lock(hdev);
16ab91ab
JH
1571
1572 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1573
1574 hdev->discov_timeout = 0;
1575
09fd0de5 1576 hci_dev_unlock(hdev);
16ab91ab
JH
1577}
1578
2aeb9a1a
JH
1579int hci_uuids_clear(struct hci_dev *hdev)
1580{
4821002c 1581 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1582
4821002c
JH
1583 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1584 list_del(&uuid->list);
2aeb9a1a
JH
1585 kfree(uuid);
1586 }
1587
1588 return 0;
1589}
1590
55ed8ca1
JH
1591int hci_link_keys_clear(struct hci_dev *hdev)
1592{
1593 struct list_head *p, *n;
1594
1595 list_for_each_safe(p, n, &hdev->link_keys) {
1596 struct link_key *key;
1597
1598 key = list_entry(p, struct link_key, list);
1599
1600 list_del(p);
1601 kfree(key);
1602 }
1603
1604 return 0;
1605}
1606
b899efaf
VCG
1607int hci_smp_ltks_clear(struct hci_dev *hdev)
1608{
1609 struct smp_ltk *k, *tmp;
1610
1611 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1612 list_del(&k->list);
1613 kfree(k);
1614 }
1615
1616 return 0;
1617}
1618
55ed8ca1
JH
1619struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1620{
8035ded4 1621 struct link_key *k;
55ed8ca1 1622
8035ded4 1623 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1624 if (bacmp(bdaddr, &k->bdaddr) == 0)
1625 return k;
55ed8ca1
JH
1626
1627 return NULL;
1628}
1629
745c0ce3 1630static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1631 u8 key_type, u8 old_key_type)
d25e28ab
JH
1632{
1633 /* Legacy key */
1634 if (key_type < 0x03)
745c0ce3 1635 return true;
d25e28ab
JH
1636
1637 /* Debug keys are insecure so don't store them persistently */
1638 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1639 return false;
d25e28ab
JH
1640
1641 /* Changed combination key and there's no previous one */
1642 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1643 return false;
d25e28ab
JH
1644
1645 /* Security mode 3 case */
1646 if (!conn)
745c0ce3 1647 return true;
d25e28ab
JH
1648
1649 /* Neither local nor remote side had no-bonding as requirement */
1650 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1651 return true;
d25e28ab
JH
1652
1653 /* Local side had dedicated bonding as requirement */
1654 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1655 return true;
d25e28ab
JH
1656
1657 /* Remote side had dedicated bonding as requirement */
1658 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1659 return true;
d25e28ab
JH
1660
1661 /* If none of the above criteria match, then don't store the key
1662 * persistently */
745c0ce3 1663 return false;
d25e28ab
JH
1664}
1665
c9839a11 1666struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1667{
c9839a11 1668 struct smp_ltk *k;
75d262c2 1669
c9839a11
VCG
1670 list_for_each_entry(k, &hdev->long_term_keys, list) {
1671 if (k->ediv != ediv ||
a8c5fb1a 1672 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1673 continue;
1674
c9839a11 1675 return k;
75d262c2
VCG
1676 }
1677
1678 return NULL;
1679}
75d262c2 1680
c9839a11 1681struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1682 u8 addr_type)
75d262c2 1683{
c9839a11 1684 struct smp_ltk *k;
75d262c2 1685
c9839a11
VCG
1686 list_for_each_entry(k, &hdev->long_term_keys, list)
1687 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1688 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1689 return k;
1690
1691 return NULL;
1692}
75d262c2 1693
d25e28ab 1694int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1695 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1696{
1697 struct link_key *key, *old_key;
745c0ce3
VA
1698 u8 old_key_type;
1699 bool persistent;
55ed8ca1
JH
1700
1701 old_key = hci_find_link_key(hdev, bdaddr);
1702 if (old_key) {
1703 old_key_type = old_key->type;
1704 key = old_key;
1705 } else {
12adcf3a 1706 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1707 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1708 if (!key)
1709 return -ENOMEM;
1710 list_add(&key->list, &hdev->link_keys);
1711 }
1712
6ed93dc6 1713 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1714
d25e28ab
JH
1715 /* Some buggy controller combinations generate a changed
1716 * combination key for legacy pairing even when there's no
1717 * previous key */
1718 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1719 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1720 type = HCI_LK_COMBINATION;
655fe6ec
JH
1721 if (conn)
1722 conn->key_type = type;
1723 }
d25e28ab 1724
55ed8ca1 1725 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1726 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1727 key->pin_len = pin_len;
1728
b6020ba0 1729 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1730 key->type = old_key_type;
4748fed2
JH
1731 else
1732 key->type = type;
1733
4df378a1
JH
1734 if (!new_key)
1735 return 0;
1736
1737 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1738
744cf19e 1739 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1740
6ec5bcad
VA
1741 if (conn)
1742 conn->flush_key = !persistent;
55ed8ca1
JH
1743
1744 return 0;
1745}
1746
c9839a11 1747int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1748 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1749 ediv, u8 rand[8])
75d262c2 1750{
c9839a11 1751 struct smp_ltk *key, *old_key;
75d262c2 1752
c9839a11
VCG
1753 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1754 return 0;
75d262c2 1755
c9839a11
VCG
1756 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1757 if (old_key)
75d262c2 1758 key = old_key;
c9839a11
VCG
1759 else {
1760 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1761 if (!key)
1762 return -ENOMEM;
c9839a11 1763 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1764 }
1765
75d262c2 1766 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1767 key->bdaddr_type = addr_type;
1768 memcpy(key->val, tk, sizeof(key->val));
1769 key->authenticated = authenticated;
1770 key->ediv = ediv;
1771 key->enc_size = enc_size;
1772 key->type = type;
1773 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1774
c9839a11
VCG
1775 if (!new_key)
1776 return 0;
75d262c2 1777
261cc5aa
VCG
1778 if (type & HCI_SMP_LTK)
1779 mgmt_new_ltk(hdev, key, 1);
1780
75d262c2
VCG
1781 return 0;
1782}
1783
55ed8ca1
JH
1784int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1785{
1786 struct link_key *key;
1787
1788 key = hci_find_link_key(hdev, bdaddr);
1789 if (!key)
1790 return -ENOENT;
1791
6ed93dc6 1792 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1793
1794 list_del(&key->list);
1795 kfree(key);
1796
1797 return 0;
1798}
1799
b899efaf
VCG
1800int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1801{
1802 struct smp_ltk *k, *tmp;
1803
1804 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1805 if (bacmp(bdaddr, &k->bdaddr))
1806 continue;
1807
6ed93dc6 1808 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1809
1810 list_del(&k->list);
1811 kfree(k);
1812 }
1813
1814 return 0;
1815}
1816
6bd32326 1817/* HCI command timer function */
bda4f23a 1818static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1819{
1820 struct hci_dev *hdev = (void *) arg;
1821
bda4f23a
AE
1822 if (hdev->sent_cmd) {
1823 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1824 u16 opcode = __le16_to_cpu(sent->opcode);
1825
1826 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1827 } else {
1828 BT_ERR("%s command tx timeout", hdev->name);
1829 }
1830
6bd32326 1831 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1832 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1833}
1834
2763eda6 1835struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1836 bdaddr_t *bdaddr)
2763eda6
SJ
1837{
1838 struct oob_data *data;
1839
1840 list_for_each_entry(data, &hdev->remote_oob_data, list)
1841 if (bacmp(bdaddr, &data->bdaddr) == 0)
1842 return data;
1843
1844 return NULL;
1845}
1846
1847int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1848{
1849 struct oob_data *data;
1850
1851 data = hci_find_remote_oob_data(hdev, bdaddr);
1852 if (!data)
1853 return -ENOENT;
1854
6ed93dc6 1855 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1856
1857 list_del(&data->list);
1858 kfree(data);
1859
1860 return 0;
1861}
1862
1863int hci_remote_oob_data_clear(struct hci_dev *hdev)
1864{
1865 struct oob_data *data, *n;
1866
1867 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1868 list_del(&data->list);
1869 kfree(data);
1870 }
1871
1872 return 0;
1873}
1874
1875int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1876 u8 *randomizer)
2763eda6
SJ
1877{
1878 struct oob_data *data;
1879
1880 data = hci_find_remote_oob_data(hdev, bdaddr);
1881
1882 if (!data) {
1883 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1884 if (!data)
1885 return -ENOMEM;
1886
1887 bacpy(&data->bdaddr, bdaddr);
1888 list_add(&data->list, &hdev->remote_oob_data);
1889 }
1890
1891 memcpy(data->hash, hash, sizeof(data->hash));
1892 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1893
6ed93dc6 1894 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1895
1896 return 0;
1897}
1898
04124681 1899struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1900{
8035ded4 1901 struct bdaddr_list *b;
b2a66aad 1902
8035ded4 1903 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1904 if (bacmp(bdaddr, &b->bdaddr) == 0)
1905 return b;
b2a66aad
AJ
1906
1907 return NULL;
1908}
1909
1910int hci_blacklist_clear(struct hci_dev *hdev)
1911{
1912 struct list_head *p, *n;
1913
1914 list_for_each_safe(p, n, &hdev->blacklist) {
1915 struct bdaddr_list *b;
1916
1917 b = list_entry(p, struct bdaddr_list, list);
1918
1919 list_del(p);
1920 kfree(b);
1921 }
1922
1923 return 0;
1924}
1925
88c1fe4b 1926int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1927{
1928 struct bdaddr_list *entry;
b2a66aad
AJ
1929
1930 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1931 return -EBADF;
1932
5e762444
AJ
1933 if (hci_blacklist_lookup(hdev, bdaddr))
1934 return -EEXIST;
b2a66aad
AJ
1935
1936 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1937 if (!entry)
1938 return -ENOMEM;
b2a66aad
AJ
1939
1940 bacpy(&entry->bdaddr, bdaddr);
1941
1942 list_add(&entry->list, &hdev->blacklist);
1943
88c1fe4b 1944 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1945}
1946
88c1fe4b 1947int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1948{
1949 struct bdaddr_list *entry;
b2a66aad 1950
1ec918ce 1951 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1952 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1953
1954 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1955 if (!entry)
5e762444 1956 return -ENOENT;
b2a66aad
AJ
1957
1958 list_del(&entry->list);
1959 kfree(entry);
1960
88c1fe4b 1961 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1962}
1963
42c6b129 1964static void le_scan_param_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1965{
1966 struct le_scan_params *param = (struct le_scan_params *) opt;
1967 struct hci_cp_le_set_scan_param cp;
1968
1969 memset(&cp, 0, sizeof(cp));
1970 cp.type = param->type;
1971 cp.interval = cpu_to_le16(param->interval);
1972 cp.window = cpu_to_le16(param->window);
1973
42c6b129 1974 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
7ba8b4be
AG
1975}
1976
42c6b129 1977static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
7ba8b4be
AG
1978{
1979 struct hci_cp_le_set_scan_enable cp;
1980
1981 memset(&cp, 0, sizeof(cp));
1982 cp.enable = 1;
0431a43c 1983 cp.filter_dup = 1;
7ba8b4be 1984
42c6b129 1985 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
7ba8b4be
AG
1986}
1987
1988static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1989 u16 window, int timeout)
7ba8b4be
AG
1990{
1991 long timeo = msecs_to_jiffies(3000);
1992 struct le_scan_params param;
1993 int err;
1994
1995 BT_DBG("%s", hdev->name);
1996
1997 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1998 return -EINPROGRESS;
1999
2000 param.type = type;
2001 param.interval = interval;
2002 param.window = window;
2003
2004 hci_req_lock(hdev);
2005
01178cd4
JH
2006 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2007 timeo);
7ba8b4be 2008 if (!err)
01178cd4 2009 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
7ba8b4be
AG
2010
2011 hci_req_unlock(hdev);
2012
2013 if (err < 0)
2014 return err;
2015
46818ed5
JH
2016 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2017 msecs_to_jiffies(timeout));
7ba8b4be
AG
2018
2019 return 0;
2020}
2021
7dbfac1d
AG
2022int hci_cancel_le_scan(struct hci_dev *hdev)
2023{
2024 BT_DBG("%s", hdev->name);
2025
2026 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2027 return -EALREADY;
2028
2029 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2030 struct hci_cp_le_set_scan_enable cp;
2031
2032 /* Send HCI command to disable LE Scan */
2033 memset(&cp, 0, sizeof(cp));
2034 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2035 }
2036
2037 return 0;
2038}
2039
7ba8b4be
AG
2040static void le_scan_disable_work(struct work_struct *work)
2041{
2042 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2043 le_scan_disable.work);
7ba8b4be
AG
2044 struct hci_cp_le_set_scan_enable cp;
2045
2046 BT_DBG("%s", hdev->name);
2047
2048 memset(&cp, 0, sizeof(cp));
2049
2050 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2051}
2052
28b75a89
AG
2053static void le_scan_work(struct work_struct *work)
2054{
2055 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2056 struct le_scan_params *param = &hdev->le_scan_params;
2057
2058 BT_DBG("%s", hdev->name);
2059
04124681
GP
2060 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2061 param->timeout);
28b75a89
AG
2062}
2063
2064int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 2065 int timeout)
28b75a89
AG
2066{
2067 struct le_scan_params *param = &hdev->le_scan_params;
2068
2069 BT_DBG("%s", hdev->name);
2070
f1550478
JH
2071 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2072 return -ENOTSUPP;
2073
28b75a89
AG
2074 if (work_busy(&hdev->le_scan))
2075 return -EINPROGRESS;
2076
2077 param->type = type;
2078 param->interval = interval;
2079 param->window = window;
2080 param->timeout = timeout;
2081
2082 queue_work(system_long_wq, &hdev->le_scan);
2083
2084 return 0;
2085}
2086
9be0dab7
DH
2087/* Alloc HCI device */
2088struct hci_dev *hci_alloc_dev(void)
2089{
2090 struct hci_dev *hdev;
2091
2092 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2093 if (!hdev)
2094 return NULL;
2095
b1b813d4
DH
2096 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2097 hdev->esco_type = (ESCO_HV1);
2098 hdev->link_mode = (HCI_LM_ACCEPT);
2099 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2100 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2101 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2102
b1b813d4
DH
2103 hdev->sniff_max_interval = 800;
2104 hdev->sniff_min_interval = 80;
2105
2106 mutex_init(&hdev->lock);
2107 mutex_init(&hdev->req_lock);
2108
2109 INIT_LIST_HEAD(&hdev->mgmt_pending);
2110 INIT_LIST_HEAD(&hdev->blacklist);
2111 INIT_LIST_HEAD(&hdev->uuids);
2112 INIT_LIST_HEAD(&hdev->link_keys);
2113 INIT_LIST_HEAD(&hdev->long_term_keys);
2114 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2115 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2116
2117 INIT_WORK(&hdev->rx_work, hci_rx_work);
2118 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2119 INIT_WORK(&hdev->tx_work, hci_tx_work);
2120 INIT_WORK(&hdev->power_on, hci_power_on);
2121 INIT_WORK(&hdev->le_scan, le_scan_work);
2122
b1b813d4
DH
2123 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2124 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2125 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2126
b1b813d4
DH
2127 skb_queue_head_init(&hdev->rx_q);
2128 skb_queue_head_init(&hdev->cmd_q);
2129 skb_queue_head_init(&hdev->raw_q);
2130
2131 init_waitqueue_head(&hdev->req_wait_q);
2132
bda4f23a 2133 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2134
b1b813d4
DH
2135 hci_init_sysfs(hdev);
2136 discovery_init(hdev);
9be0dab7
DH
2137
2138 return hdev;
2139}
2140EXPORT_SYMBOL(hci_alloc_dev);
2141
2142/* Free HCI device */
2143void hci_free_dev(struct hci_dev *hdev)
2144{
9be0dab7
DH
2145 /* will free via device release */
2146 put_device(&hdev->dev);
2147}
2148EXPORT_SYMBOL(hci_free_dev);
2149
1da177e4
LT
2150/* Register HCI device */
2151int hci_register_dev(struct hci_dev *hdev)
2152{
b1b813d4 2153 int id, error;
1da177e4 2154
010666a1 2155 if (!hdev->open || !hdev->close)
1da177e4
LT
2156 return -EINVAL;
2157
08add513
MM
2158 /* Do not allow HCI_AMP devices to register at index 0,
2159 * so the index can be used as the AMP controller ID.
2160 */
3df92b31
SL
2161 switch (hdev->dev_type) {
2162 case HCI_BREDR:
2163 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2164 break;
2165 case HCI_AMP:
2166 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2167 break;
2168 default:
2169 return -EINVAL;
1da177e4 2170 }
8e87d142 2171
3df92b31
SL
2172 if (id < 0)
2173 return id;
2174
1da177e4
LT
2175 sprintf(hdev->name, "hci%d", id);
2176 hdev->id = id;
2d8b3a11
AE
2177
2178 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2179
3df92b31
SL
2180 write_lock(&hci_dev_list_lock);
2181 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2182 write_unlock(&hci_dev_list_lock);
1da177e4 2183
32845eb1 2184 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 2185 WQ_MEM_RECLAIM, 1);
33ca954d
DH
2186 if (!hdev->workqueue) {
2187 error = -ENOMEM;
2188 goto err;
2189 }
f48fd9c8 2190
6ead1bbc
JH
2191 hdev->req_workqueue = alloc_workqueue(hdev->name,
2192 WQ_HIGHPRI | WQ_UNBOUND |
2193 WQ_MEM_RECLAIM, 1);
2194 if (!hdev->req_workqueue) {
2195 destroy_workqueue(hdev->workqueue);
2196 error = -ENOMEM;
2197 goto err;
2198 }
2199
33ca954d
DH
2200 error = hci_add_sysfs(hdev);
2201 if (error < 0)
2202 goto err_wqueue;
1da177e4 2203
611b30f7 2204 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2205 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2206 hdev);
611b30f7
MH
2207 if (hdev->rfkill) {
2208 if (rfkill_register(hdev->rfkill) < 0) {
2209 rfkill_destroy(hdev->rfkill);
2210 hdev->rfkill = NULL;
2211 }
2212 }
2213
a8b2d5c2 2214 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2215
2216 if (hdev->dev_type != HCI_AMP)
2217 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2218
1da177e4 2219 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2220 hci_dev_hold(hdev);
1da177e4 2221
19202573 2222 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2223
1da177e4 2224 return id;
f48fd9c8 2225
33ca954d
DH
2226err_wqueue:
2227 destroy_workqueue(hdev->workqueue);
6ead1bbc 2228 destroy_workqueue(hdev->req_workqueue);
33ca954d 2229err:
3df92b31 2230 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2231 write_lock(&hci_dev_list_lock);
f48fd9c8 2232 list_del(&hdev->list);
f20d09d5 2233 write_unlock(&hci_dev_list_lock);
f48fd9c8 2234
33ca954d 2235 return error;
1da177e4
LT
2236}
2237EXPORT_SYMBOL(hci_register_dev);
2238
2239/* Unregister HCI device */
59735631 2240void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2241{
3df92b31 2242 int i, id;
ef222013 2243
c13854ce 2244 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2245
94324962
JH
2246 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2247
3df92b31
SL
2248 id = hdev->id;
2249
f20d09d5 2250 write_lock(&hci_dev_list_lock);
1da177e4 2251 list_del(&hdev->list);
f20d09d5 2252 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2253
2254 hci_dev_do_close(hdev);
2255
cd4c5391 2256 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2257 kfree_skb(hdev->reassembly[i]);
2258
b9b5ef18
GP
2259 cancel_work_sync(&hdev->power_on);
2260
ab81cbf9 2261 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2262 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2263 hci_dev_lock(hdev);
744cf19e 2264 mgmt_index_removed(hdev);
09fd0de5 2265 hci_dev_unlock(hdev);
56e5cb86 2266 }
ab81cbf9 2267
2e58ef3e
JH
2268 /* mgmt_index_removed should take care of emptying the
2269 * pending list */
2270 BUG_ON(!list_empty(&hdev->mgmt_pending));
2271
1da177e4
LT
2272 hci_notify(hdev, HCI_DEV_UNREG);
2273
611b30f7
MH
2274 if (hdev->rfkill) {
2275 rfkill_unregister(hdev->rfkill);
2276 rfkill_destroy(hdev->rfkill);
2277 }
2278
ce242970 2279 hci_del_sysfs(hdev);
147e2d59 2280
f48fd9c8 2281 destroy_workqueue(hdev->workqueue);
6ead1bbc 2282 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2283
09fd0de5 2284 hci_dev_lock(hdev);
e2e0cacb 2285 hci_blacklist_clear(hdev);
2aeb9a1a 2286 hci_uuids_clear(hdev);
55ed8ca1 2287 hci_link_keys_clear(hdev);
b899efaf 2288 hci_smp_ltks_clear(hdev);
2763eda6 2289 hci_remote_oob_data_clear(hdev);
09fd0de5 2290 hci_dev_unlock(hdev);
e2e0cacb 2291
dc946bd8 2292 hci_dev_put(hdev);
3df92b31
SL
2293
2294 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2295}
2296EXPORT_SYMBOL(hci_unregister_dev);
2297
2298/* Suspend HCI device */
2299int hci_suspend_dev(struct hci_dev *hdev)
2300{
2301 hci_notify(hdev, HCI_DEV_SUSPEND);
2302 return 0;
2303}
2304EXPORT_SYMBOL(hci_suspend_dev);
2305
2306/* Resume HCI device */
2307int hci_resume_dev(struct hci_dev *hdev)
2308{
2309 hci_notify(hdev, HCI_DEV_RESUME);
2310 return 0;
2311}
2312EXPORT_SYMBOL(hci_resume_dev);
2313
76bca880
MH
2314/* Receive frame from HCI drivers */
2315int hci_recv_frame(struct sk_buff *skb)
2316{
2317 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2318 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2319 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2320 kfree_skb(skb);
2321 return -ENXIO;
2322 }
2323
d82603c6 2324 /* Incoming skb */
76bca880
MH
2325 bt_cb(skb)->incoming = 1;
2326
2327 /* Time stamp */
2328 __net_timestamp(skb);
2329
76bca880 2330 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2331 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2332
76bca880
MH
2333 return 0;
2334}
2335EXPORT_SYMBOL(hci_recv_frame);
2336
33e882a5 2337static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2338 int count, __u8 index)
33e882a5
SS
2339{
2340 int len = 0;
2341 int hlen = 0;
2342 int remain = count;
2343 struct sk_buff *skb;
2344 struct bt_skb_cb *scb;
2345
2346 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2347 index >= NUM_REASSEMBLY)
33e882a5
SS
2348 return -EILSEQ;
2349
2350 skb = hdev->reassembly[index];
2351
2352 if (!skb) {
2353 switch (type) {
2354 case HCI_ACLDATA_PKT:
2355 len = HCI_MAX_FRAME_SIZE;
2356 hlen = HCI_ACL_HDR_SIZE;
2357 break;
2358 case HCI_EVENT_PKT:
2359 len = HCI_MAX_EVENT_SIZE;
2360 hlen = HCI_EVENT_HDR_SIZE;
2361 break;
2362 case HCI_SCODATA_PKT:
2363 len = HCI_MAX_SCO_SIZE;
2364 hlen = HCI_SCO_HDR_SIZE;
2365 break;
2366 }
2367
1e429f38 2368 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2369 if (!skb)
2370 return -ENOMEM;
2371
2372 scb = (void *) skb->cb;
2373 scb->expect = hlen;
2374 scb->pkt_type = type;
2375
2376 skb->dev = (void *) hdev;
2377 hdev->reassembly[index] = skb;
2378 }
2379
2380 while (count) {
2381 scb = (void *) skb->cb;
89bb46d0 2382 len = min_t(uint, scb->expect, count);
33e882a5
SS
2383
2384 memcpy(skb_put(skb, len), data, len);
2385
2386 count -= len;
2387 data += len;
2388 scb->expect -= len;
2389 remain = count;
2390
2391 switch (type) {
2392 case HCI_EVENT_PKT:
2393 if (skb->len == HCI_EVENT_HDR_SIZE) {
2394 struct hci_event_hdr *h = hci_event_hdr(skb);
2395 scb->expect = h->plen;
2396
2397 if (skb_tailroom(skb) < scb->expect) {
2398 kfree_skb(skb);
2399 hdev->reassembly[index] = NULL;
2400 return -ENOMEM;
2401 }
2402 }
2403 break;
2404
2405 case HCI_ACLDATA_PKT:
2406 if (skb->len == HCI_ACL_HDR_SIZE) {
2407 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2408 scb->expect = __le16_to_cpu(h->dlen);
2409
2410 if (skb_tailroom(skb) < scb->expect) {
2411 kfree_skb(skb);
2412 hdev->reassembly[index] = NULL;
2413 return -ENOMEM;
2414 }
2415 }
2416 break;
2417
2418 case HCI_SCODATA_PKT:
2419 if (skb->len == HCI_SCO_HDR_SIZE) {
2420 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2421 scb->expect = h->dlen;
2422
2423 if (skb_tailroom(skb) < scb->expect) {
2424 kfree_skb(skb);
2425 hdev->reassembly[index] = NULL;
2426 return -ENOMEM;
2427 }
2428 }
2429 break;
2430 }
2431
2432 if (scb->expect == 0) {
2433 /* Complete frame */
2434
2435 bt_cb(skb)->pkt_type = type;
2436 hci_recv_frame(skb);
2437
2438 hdev->reassembly[index] = NULL;
2439 return remain;
2440 }
2441 }
2442
2443 return remain;
2444}
2445
ef222013
MH
2446int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2447{
f39a3c06
SS
2448 int rem = 0;
2449
ef222013
MH
2450 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2451 return -EILSEQ;
2452
da5f6c37 2453 while (count) {
1e429f38 2454 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2455 if (rem < 0)
2456 return rem;
ef222013 2457
f39a3c06
SS
2458 data += (count - rem);
2459 count = rem;
f81c6224 2460 }
ef222013 2461
f39a3c06 2462 return rem;
ef222013
MH
2463}
2464EXPORT_SYMBOL(hci_recv_fragment);
2465
99811510
SS
2466#define STREAM_REASSEMBLY 0
2467
2468int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2469{
2470 int type;
2471 int rem = 0;
2472
da5f6c37 2473 while (count) {
99811510
SS
2474 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2475
2476 if (!skb) {
2477 struct { char type; } *pkt;
2478
2479 /* Start of the frame */
2480 pkt = data;
2481 type = pkt->type;
2482
2483 data++;
2484 count--;
2485 } else
2486 type = bt_cb(skb)->pkt_type;
2487
1e429f38 2488 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2489 STREAM_REASSEMBLY);
99811510
SS
2490 if (rem < 0)
2491 return rem;
2492
2493 data += (count - rem);
2494 count = rem;
f81c6224 2495 }
99811510
SS
2496
2497 return rem;
2498}
2499EXPORT_SYMBOL(hci_recv_stream_fragment);
2500
1da177e4
LT
2501/* ---- Interface to upper protocols ---- */
2502
1da177e4
LT
2503int hci_register_cb(struct hci_cb *cb)
2504{
2505 BT_DBG("%p name %s", cb, cb->name);
2506
f20d09d5 2507 write_lock(&hci_cb_list_lock);
1da177e4 2508 list_add(&cb->list, &hci_cb_list);
f20d09d5 2509 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2510
2511 return 0;
2512}
2513EXPORT_SYMBOL(hci_register_cb);
2514
2515int hci_unregister_cb(struct hci_cb *cb)
2516{
2517 BT_DBG("%p name %s", cb, cb->name);
2518
f20d09d5 2519 write_lock(&hci_cb_list_lock);
1da177e4 2520 list_del(&cb->list);
f20d09d5 2521 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2522
2523 return 0;
2524}
2525EXPORT_SYMBOL(hci_unregister_cb);
2526
2527static int hci_send_frame(struct sk_buff *skb)
2528{
2529 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2530
2531 if (!hdev) {
2532 kfree_skb(skb);
2533 return -ENODEV;
2534 }
2535
0d48d939 2536 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2537
cd82e61c
MH
2538 /* Time stamp */
2539 __net_timestamp(skb);
1da177e4 2540
cd82e61c
MH
2541 /* Send copy to monitor */
2542 hci_send_to_monitor(hdev, skb);
2543
2544 if (atomic_read(&hdev->promisc)) {
2545 /* Send copy to the sockets */
470fe1b5 2546 hci_send_to_sock(hdev, skb);
1da177e4
LT
2547 }
2548
2549 /* Get rid of skb owner, prior to sending to the driver. */
2550 skb_orphan(skb);
2551
2552 return hdev->send(skb);
2553}
2554
3119ae95
JH
2555void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2556{
2557 skb_queue_head_init(&req->cmd_q);
2558 req->hdev = hdev;
5d73e034 2559 req->err = 0;
3119ae95
JH
2560}
2561
2562int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2563{
2564 struct hci_dev *hdev = req->hdev;
2565 struct sk_buff *skb;
2566 unsigned long flags;
2567
2568 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2569
5d73e034
AG
2570 /* If an error occured during request building, remove all HCI
2571 * commands queued on the HCI request queue.
2572 */
2573 if (req->err) {
2574 skb_queue_purge(&req->cmd_q);
2575 return req->err;
2576 }
2577
3119ae95
JH
2578 /* Do not allow empty requests */
2579 if (skb_queue_empty(&req->cmd_q))
382b0c39 2580 return -ENODATA;
3119ae95
JH
2581
2582 skb = skb_peek_tail(&req->cmd_q);
2583 bt_cb(skb)->req.complete = complete;
2584
2585 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2586 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2587 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2588
2589 queue_work(hdev->workqueue, &hdev->cmd_work);
2590
2591 return 0;
2592}
2593
1ca3a9d0
JH
2594static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2595 u32 plen, void *param)
1da177e4
LT
2596{
2597 int len = HCI_COMMAND_HDR_SIZE + plen;
2598 struct hci_command_hdr *hdr;
2599 struct sk_buff *skb;
2600
1da177e4 2601 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2602 if (!skb)
2603 return NULL;
1da177e4
LT
2604
2605 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2606 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2607 hdr->plen = plen;
2608
2609 if (plen)
2610 memcpy(skb_put(skb, plen), param, plen);
2611
2612 BT_DBG("skb len %d", skb->len);
2613
0d48d939 2614 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2615 skb->dev = (void *) hdev;
c78ae283 2616
1ca3a9d0
JH
2617 return skb;
2618}
2619
2620/* Send HCI command */
2621int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2622{
2623 struct sk_buff *skb;
2624
2625 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2626
2627 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2628 if (!skb) {
2629 BT_ERR("%s no memory for command", hdev->name);
2630 return -ENOMEM;
2631 }
2632
11714b3d
JH
2633 /* Stand-alone HCI commands must be flaged as
2634 * single-command requests.
2635 */
2636 bt_cb(skb)->req.start = true;
2637
1da177e4 2638 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2639 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2640
2641 return 0;
2642}
1da177e4 2643
71c76a17 2644/* Queue a command to an asynchronous HCI request */
02350a72
JH
2645void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void *param,
2646 u8 event)
71c76a17
JH
2647{
2648 struct hci_dev *hdev = req->hdev;
2649 struct sk_buff *skb;
2650
2651 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2652
34739c1e
AG
2653 /* If an error occured during request building, there is no point in
2654 * queueing the HCI command. We can simply return.
2655 */
2656 if (req->err)
2657 return;
2658
71c76a17
JH
2659 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2660 if (!skb) {
5d73e034
AG
2661 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2662 hdev->name, opcode);
2663 req->err = -ENOMEM;
e348fe6b 2664 return;
71c76a17
JH
2665 }
2666
2667 if (skb_queue_empty(&req->cmd_q))
2668 bt_cb(skb)->req.start = true;
2669
02350a72
JH
2670 bt_cb(skb)->req.event = event;
2671
71c76a17 2672 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2673}
2674
02350a72
JH
2675void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2676{
2677 hci_req_add_ev(req, opcode, plen, param, 0);
2678}
2679
1da177e4 2680/* Get data from the previously sent command */
a9de9248 2681void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2682{
2683 struct hci_command_hdr *hdr;
2684
2685 if (!hdev->sent_cmd)
2686 return NULL;
2687
2688 hdr = (void *) hdev->sent_cmd->data;
2689
a9de9248 2690 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2691 return NULL;
2692
f0e09510 2693 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2694
2695 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2696}
2697
2698/* Send ACL data */
2699static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2700{
2701 struct hci_acl_hdr *hdr;
2702 int len = skb->len;
2703
badff6d0
ACM
2704 skb_push(skb, HCI_ACL_HDR_SIZE);
2705 skb_reset_transport_header(skb);
9c70220b 2706 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2707 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2708 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2709}
2710
ee22be7e 2711static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2712 struct sk_buff *skb, __u16 flags)
1da177e4 2713{
ee22be7e 2714 struct hci_conn *conn = chan->conn;
1da177e4
LT
2715 struct hci_dev *hdev = conn->hdev;
2716 struct sk_buff *list;
2717
087bfd99
GP
2718 skb->len = skb_headlen(skb);
2719 skb->data_len = 0;
2720
2721 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2722
2723 switch (hdev->dev_type) {
2724 case HCI_BREDR:
2725 hci_add_acl_hdr(skb, conn->handle, flags);
2726 break;
2727 case HCI_AMP:
2728 hci_add_acl_hdr(skb, chan->handle, flags);
2729 break;
2730 default:
2731 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2732 return;
2733 }
087bfd99 2734
70f23020
AE
2735 list = skb_shinfo(skb)->frag_list;
2736 if (!list) {
1da177e4
LT
2737 /* Non fragmented */
2738 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2739
73d80deb 2740 skb_queue_tail(queue, skb);
1da177e4
LT
2741 } else {
2742 /* Fragmented */
2743 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2744
2745 skb_shinfo(skb)->frag_list = NULL;
2746
2747 /* Queue all fragments atomically */
af3e6359 2748 spin_lock(&queue->lock);
1da177e4 2749
73d80deb 2750 __skb_queue_tail(queue, skb);
e702112f
AE
2751
2752 flags &= ~ACL_START;
2753 flags |= ACL_CONT;
1da177e4
LT
2754 do {
2755 skb = list; list = list->next;
8e87d142 2756
1da177e4 2757 skb->dev = (void *) hdev;
0d48d939 2758 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2759 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2760
2761 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2762
73d80deb 2763 __skb_queue_tail(queue, skb);
1da177e4
LT
2764 } while (list);
2765
af3e6359 2766 spin_unlock(&queue->lock);
1da177e4 2767 }
73d80deb
LAD
2768}
2769
2770void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2771{
ee22be7e 2772 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2773
f0e09510 2774 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2775
2776 skb->dev = (void *) hdev;
73d80deb 2777
ee22be7e 2778 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2779
3eff45ea 2780 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2781}
1da177e4
LT
2782
2783/* Send SCO data */
0d861d8b 2784void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2785{
2786 struct hci_dev *hdev = conn->hdev;
2787 struct hci_sco_hdr hdr;
2788
2789 BT_DBG("%s len %d", hdev->name, skb->len);
2790
aca3192c 2791 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2792 hdr.dlen = skb->len;
2793
badff6d0
ACM
2794 skb_push(skb, HCI_SCO_HDR_SIZE);
2795 skb_reset_transport_header(skb);
9c70220b 2796 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2797
2798 skb->dev = (void *) hdev;
0d48d939 2799 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2800
1da177e4 2801 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2802 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2803}
1da177e4
LT
2804
2805/* ---- HCI TX task (outgoing data) ---- */
2806
2807/* HCI Connection scheduler */
6039aa73
GP
2808static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2809 int *quote)
1da177e4
LT
2810{
2811 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2812 struct hci_conn *conn = NULL, *c;
abc5de8f 2813 unsigned int num = 0, min = ~0;
1da177e4 2814
8e87d142 2815 /* We don't have to lock device here. Connections are always
1da177e4 2816 * added and removed with TX task disabled. */
bf4c6325
GP
2817
2818 rcu_read_lock();
2819
2820 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2821 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2822 continue;
769be974
MH
2823
2824 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2825 continue;
2826
1da177e4
LT
2827 num++;
2828
2829 if (c->sent < min) {
2830 min = c->sent;
2831 conn = c;
2832 }
52087a79
LAD
2833
2834 if (hci_conn_num(hdev, type) == num)
2835 break;
1da177e4
LT
2836 }
2837
bf4c6325
GP
2838 rcu_read_unlock();
2839
1da177e4 2840 if (conn) {
6ed58ec5
VT
2841 int cnt, q;
2842
2843 switch (conn->type) {
2844 case ACL_LINK:
2845 cnt = hdev->acl_cnt;
2846 break;
2847 case SCO_LINK:
2848 case ESCO_LINK:
2849 cnt = hdev->sco_cnt;
2850 break;
2851 case LE_LINK:
2852 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2853 break;
2854 default:
2855 cnt = 0;
2856 BT_ERR("Unknown link type");
2857 }
2858
2859 q = cnt / num;
1da177e4
LT
2860 *quote = q ? q : 1;
2861 } else
2862 *quote = 0;
2863
2864 BT_DBG("conn %p quote %d", conn, *quote);
2865 return conn;
2866}
2867
6039aa73 2868static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2869{
2870 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2871 struct hci_conn *c;
1da177e4 2872
bae1f5d9 2873 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2874
bf4c6325
GP
2875 rcu_read_lock();
2876
1da177e4 2877 /* Kill stalled connections */
bf4c6325 2878 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2879 if (c->type == type && c->sent) {
6ed93dc6
AE
2880 BT_ERR("%s killing stalled connection %pMR",
2881 hdev->name, &c->dst);
bed71748 2882 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2883 }
2884 }
bf4c6325
GP
2885
2886 rcu_read_unlock();
1da177e4
LT
2887}
2888
6039aa73
GP
2889static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2890 int *quote)
1da177e4 2891{
73d80deb
LAD
2892 struct hci_conn_hash *h = &hdev->conn_hash;
2893 struct hci_chan *chan = NULL;
abc5de8f 2894 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2895 struct hci_conn *conn;
73d80deb
LAD
2896 int cnt, q, conn_num = 0;
2897
2898 BT_DBG("%s", hdev->name);
2899
bf4c6325
GP
2900 rcu_read_lock();
2901
2902 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2903 struct hci_chan *tmp;
2904
2905 if (conn->type != type)
2906 continue;
2907
2908 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2909 continue;
2910
2911 conn_num++;
2912
8192edef 2913 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2914 struct sk_buff *skb;
2915
2916 if (skb_queue_empty(&tmp->data_q))
2917 continue;
2918
2919 skb = skb_peek(&tmp->data_q);
2920 if (skb->priority < cur_prio)
2921 continue;
2922
2923 if (skb->priority > cur_prio) {
2924 num = 0;
2925 min = ~0;
2926 cur_prio = skb->priority;
2927 }
2928
2929 num++;
2930
2931 if (conn->sent < min) {
2932 min = conn->sent;
2933 chan = tmp;
2934 }
2935 }
2936
2937 if (hci_conn_num(hdev, type) == conn_num)
2938 break;
2939 }
2940
bf4c6325
GP
2941 rcu_read_unlock();
2942
73d80deb
LAD
2943 if (!chan)
2944 return NULL;
2945
2946 switch (chan->conn->type) {
2947 case ACL_LINK:
2948 cnt = hdev->acl_cnt;
2949 break;
bd1eb66b
AE
2950 case AMP_LINK:
2951 cnt = hdev->block_cnt;
2952 break;
73d80deb
LAD
2953 case SCO_LINK:
2954 case ESCO_LINK:
2955 cnt = hdev->sco_cnt;
2956 break;
2957 case LE_LINK:
2958 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2959 break;
2960 default:
2961 cnt = 0;
2962 BT_ERR("Unknown link type");
2963 }
2964
2965 q = cnt / num;
2966 *quote = q ? q : 1;
2967 BT_DBG("chan %p quote %d", chan, *quote);
2968 return chan;
2969}
2970
02b20f0b
LAD
2971static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2972{
2973 struct hci_conn_hash *h = &hdev->conn_hash;
2974 struct hci_conn *conn;
2975 int num = 0;
2976
2977 BT_DBG("%s", hdev->name);
2978
bf4c6325
GP
2979 rcu_read_lock();
2980
2981 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2982 struct hci_chan *chan;
2983
2984 if (conn->type != type)
2985 continue;
2986
2987 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2988 continue;
2989
2990 num++;
2991
8192edef 2992 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2993 struct sk_buff *skb;
2994
2995 if (chan->sent) {
2996 chan->sent = 0;
2997 continue;
2998 }
2999
3000 if (skb_queue_empty(&chan->data_q))
3001 continue;
3002
3003 skb = skb_peek(&chan->data_q);
3004 if (skb->priority >= HCI_PRIO_MAX - 1)
3005 continue;
3006
3007 skb->priority = HCI_PRIO_MAX - 1;
3008
3009 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3010 skb->priority);
02b20f0b
LAD
3011 }
3012
3013 if (hci_conn_num(hdev, type) == num)
3014 break;
3015 }
bf4c6325
GP
3016
3017 rcu_read_unlock();
3018
02b20f0b
LAD
3019}
3020
b71d385a
AE
3021static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3022{
3023 /* Calculate count of blocks used by this packet */
3024 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3025}
3026
6039aa73 3027static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3028{
1da177e4
LT
3029 if (!test_bit(HCI_RAW, &hdev->flags)) {
3030 /* ACL tx timeout must be longer than maximum
3031 * link supervision timeout (40.9 seconds) */
63d2bc1b 3032 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3033 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3034 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3035 }
63d2bc1b 3036}
1da177e4 3037
6039aa73 3038static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3039{
3040 unsigned int cnt = hdev->acl_cnt;
3041 struct hci_chan *chan;
3042 struct sk_buff *skb;
3043 int quote;
3044
3045 __check_timeout(hdev, cnt);
04837f64 3046
73d80deb 3047 while (hdev->acl_cnt &&
a8c5fb1a 3048 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3049 u32 priority = (skb_peek(&chan->data_q))->priority;
3050 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3051 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3052 skb->len, skb->priority);
73d80deb 3053
ec1cce24
LAD
3054 /* Stop if priority has changed */
3055 if (skb->priority < priority)
3056 break;
3057
3058 skb = skb_dequeue(&chan->data_q);
3059
73d80deb 3060 hci_conn_enter_active_mode(chan->conn,
04124681 3061 bt_cb(skb)->force_active);
04837f64 3062
1da177e4
LT
3063 hci_send_frame(skb);
3064 hdev->acl_last_tx = jiffies;
3065
3066 hdev->acl_cnt--;
73d80deb
LAD
3067 chan->sent++;
3068 chan->conn->sent++;
1da177e4
LT
3069 }
3070 }
02b20f0b
LAD
3071
3072 if (cnt != hdev->acl_cnt)
3073 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3074}
3075
6039aa73 3076static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3077{
63d2bc1b 3078 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3079 struct hci_chan *chan;
3080 struct sk_buff *skb;
3081 int quote;
bd1eb66b 3082 u8 type;
b71d385a 3083
63d2bc1b 3084 __check_timeout(hdev, cnt);
b71d385a 3085
bd1eb66b
AE
3086 BT_DBG("%s", hdev->name);
3087
3088 if (hdev->dev_type == HCI_AMP)
3089 type = AMP_LINK;
3090 else
3091 type = ACL_LINK;
3092
b71d385a 3093 while (hdev->block_cnt > 0 &&
bd1eb66b 3094 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3095 u32 priority = (skb_peek(&chan->data_q))->priority;
3096 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3097 int blocks;
3098
3099 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3100 skb->len, skb->priority);
b71d385a
AE
3101
3102 /* Stop if priority has changed */
3103 if (skb->priority < priority)
3104 break;
3105
3106 skb = skb_dequeue(&chan->data_q);
3107
3108 blocks = __get_blocks(hdev, skb);
3109 if (blocks > hdev->block_cnt)
3110 return;
3111
3112 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3113 bt_cb(skb)->force_active);
b71d385a
AE
3114
3115 hci_send_frame(skb);
3116 hdev->acl_last_tx = jiffies;
3117
3118 hdev->block_cnt -= blocks;
3119 quote -= blocks;
3120
3121 chan->sent += blocks;
3122 chan->conn->sent += blocks;
3123 }
3124 }
3125
3126 if (cnt != hdev->block_cnt)
bd1eb66b 3127 hci_prio_recalculate(hdev, type);
b71d385a
AE
3128}
3129
6039aa73 3130static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3131{
3132 BT_DBG("%s", hdev->name);
3133
bd1eb66b
AE
3134 /* No ACL link over BR/EDR controller */
3135 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3136 return;
3137
3138 /* No AMP link over AMP controller */
3139 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3140 return;
3141
3142 switch (hdev->flow_ctl_mode) {
3143 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3144 hci_sched_acl_pkt(hdev);
3145 break;
3146
3147 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3148 hci_sched_acl_blk(hdev);
3149 break;
3150 }
3151}
3152
1da177e4 3153/* Schedule SCO */
6039aa73 3154static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3155{
3156 struct hci_conn *conn;
3157 struct sk_buff *skb;
3158 int quote;
3159
3160 BT_DBG("%s", hdev->name);
3161
52087a79
LAD
3162 if (!hci_conn_num(hdev, SCO_LINK))
3163 return;
3164
1da177e4
LT
3165 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3166 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3167 BT_DBG("skb %p len %d", skb, skb->len);
3168 hci_send_frame(skb);
3169
3170 conn->sent++;
3171 if (conn->sent == ~0)
3172 conn->sent = 0;
3173 }
3174 }
3175}
3176
6039aa73 3177static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3178{
3179 struct hci_conn *conn;
3180 struct sk_buff *skb;
3181 int quote;
3182
3183 BT_DBG("%s", hdev->name);
3184
52087a79
LAD
3185 if (!hci_conn_num(hdev, ESCO_LINK))
3186 return;
3187
8fc9ced3
GP
3188 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3189 &quote))) {
b6a0dc82
MH
3190 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3191 BT_DBG("skb %p len %d", skb, skb->len);
3192 hci_send_frame(skb);
3193
3194 conn->sent++;
3195 if (conn->sent == ~0)
3196 conn->sent = 0;
3197 }
3198 }
3199}
3200
6039aa73 3201static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3202{
73d80deb 3203 struct hci_chan *chan;
6ed58ec5 3204 struct sk_buff *skb;
02b20f0b 3205 int quote, cnt, tmp;
6ed58ec5
VT
3206
3207 BT_DBG("%s", hdev->name);
3208
52087a79
LAD
3209 if (!hci_conn_num(hdev, LE_LINK))
3210 return;
3211
6ed58ec5
VT
3212 if (!test_bit(HCI_RAW, &hdev->flags)) {
3213 /* LE tx timeout must be longer than maximum
3214 * link supervision timeout (40.9 seconds) */
bae1f5d9 3215 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3216 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3217 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3218 }
3219
3220 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3221 tmp = cnt;
73d80deb 3222 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3223 u32 priority = (skb_peek(&chan->data_q))->priority;
3224 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3225 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3226 skb->len, skb->priority);
6ed58ec5 3227
ec1cce24
LAD
3228 /* Stop if priority has changed */
3229 if (skb->priority < priority)
3230 break;
3231
3232 skb = skb_dequeue(&chan->data_q);
3233
6ed58ec5
VT
3234 hci_send_frame(skb);
3235 hdev->le_last_tx = jiffies;
3236
3237 cnt--;
73d80deb
LAD
3238 chan->sent++;
3239 chan->conn->sent++;
6ed58ec5
VT
3240 }
3241 }
73d80deb 3242
6ed58ec5
VT
3243 if (hdev->le_pkts)
3244 hdev->le_cnt = cnt;
3245 else
3246 hdev->acl_cnt = cnt;
02b20f0b
LAD
3247
3248 if (cnt != tmp)
3249 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3250}
3251
3eff45ea 3252static void hci_tx_work(struct work_struct *work)
1da177e4 3253{
3eff45ea 3254 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3255 struct sk_buff *skb;
3256
6ed58ec5 3257 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3258 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3259
3260 /* Schedule queues and send stuff to HCI driver */
3261
3262 hci_sched_acl(hdev);
3263
3264 hci_sched_sco(hdev);
3265
b6a0dc82
MH
3266 hci_sched_esco(hdev);
3267
6ed58ec5
VT
3268 hci_sched_le(hdev);
3269
1da177e4
LT
3270 /* Send next queued raw (unknown type) packet */
3271 while ((skb = skb_dequeue(&hdev->raw_q)))
3272 hci_send_frame(skb);
1da177e4
LT
3273}
3274
25985edc 3275/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3276
3277/* ACL data packet */
6039aa73 3278static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3279{
3280 struct hci_acl_hdr *hdr = (void *) skb->data;
3281 struct hci_conn *conn;
3282 __u16 handle, flags;
3283
3284 skb_pull(skb, HCI_ACL_HDR_SIZE);
3285
3286 handle = __le16_to_cpu(hdr->handle);
3287 flags = hci_flags(handle);
3288 handle = hci_handle(handle);
3289
f0e09510 3290 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3291 handle, flags);
1da177e4
LT
3292
3293 hdev->stat.acl_rx++;
3294
3295 hci_dev_lock(hdev);
3296 conn = hci_conn_hash_lookup_handle(hdev, handle);
3297 hci_dev_unlock(hdev);
8e87d142 3298
1da177e4 3299 if (conn) {
65983fc7 3300 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3301
1da177e4 3302 /* Send to upper protocol */
686ebf28
UF
3303 l2cap_recv_acldata(conn, skb, flags);
3304 return;
1da177e4 3305 } else {
8e87d142 3306 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3307 hdev->name, handle);
1da177e4
LT
3308 }
3309
3310 kfree_skb(skb);
3311}
3312
3313/* SCO data packet */
6039aa73 3314static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3315{
3316 struct hci_sco_hdr *hdr = (void *) skb->data;
3317 struct hci_conn *conn;
3318 __u16 handle;
3319
3320 skb_pull(skb, HCI_SCO_HDR_SIZE);
3321
3322 handle = __le16_to_cpu(hdr->handle);
3323
f0e09510 3324 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3325
3326 hdev->stat.sco_rx++;
3327
3328 hci_dev_lock(hdev);
3329 conn = hci_conn_hash_lookup_handle(hdev, handle);
3330 hci_dev_unlock(hdev);
3331
3332 if (conn) {
1da177e4 3333 /* Send to upper protocol */
686ebf28
UF
3334 sco_recv_scodata(conn, skb);
3335 return;
1da177e4 3336 } else {
8e87d142 3337 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3338 hdev->name, handle);
1da177e4
LT
3339 }
3340
3341 kfree_skb(skb);
3342}
3343
9238f36a
JH
3344static bool hci_req_is_complete(struct hci_dev *hdev)
3345{
3346 struct sk_buff *skb;
3347
3348 skb = skb_peek(&hdev->cmd_q);
3349 if (!skb)
3350 return true;
3351
3352 return bt_cb(skb)->req.start;
3353}
3354
42c6b129
JH
3355static void hci_resend_last(struct hci_dev *hdev)
3356{
3357 struct hci_command_hdr *sent;
3358 struct sk_buff *skb;
3359 u16 opcode;
3360
3361 if (!hdev->sent_cmd)
3362 return;
3363
3364 sent = (void *) hdev->sent_cmd->data;
3365 opcode = __le16_to_cpu(sent->opcode);
3366 if (opcode == HCI_OP_RESET)
3367 return;
3368
3369 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3370 if (!skb)
3371 return;
3372
3373 skb_queue_head(&hdev->cmd_q, skb);
3374 queue_work(hdev->workqueue, &hdev->cmd_work);
3375}
3376
9238f36a
JH
3377void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3378{
3379 hci_req_complete_t req_complete = NULL;
3380 struct sk_buff *skb;
3381 unsigned long flags;
3382
3383 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3384
42c6b129
JH
3385 /* If the completed command doesn't match the last one that was
3386 * sent we need to do special handling of it.
9238f36a 3387 */
42c6b129
JH
3388 if (!hci_sent_cmd_data(hdev, opcode)) {
3389 /* Some CSR based controllers generate a spontaneous
3390 * reset complete event during init and any pending
3391 * command will never be completed. In such a case we
3392 * need to resend whatever was the last sent
3393 * command.
3394 */
3395 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3396 hci_resend_last(hdev);
3397
9238f36a 3398 return;
42c6b129 3399 }
9238f36a
JH
3400
3401 /* If the command succeeded and there's still more commands in
3402 * this request the request is not yet complete.
3403 */
3404 if (!status && !hci_req_is_complete(hdev))
3405 return;
3406
3407 /* If this was the last command in a request the complete
3408 * callback would be found in hdev->sent_cmd instead of the
3409 * command queue (hdev->cmd_q).
3410 */
3411 if (hdev->sent_cmd) {
3412 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3413 if (req_complete)
3414 goto call_complete;
3415 }
3416
3417 /* Remove all pending commands belonging to this request */
3418 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3419 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3420 if (bt_cb(skb)->req.start) {
3421 __skb_queue_head(&hdev->cmd_q, skb);
3422 break;
3423 }
3424
3425 req_complete = bt_cb(skb)->req.complete;
3426 kfree_skb(skb);
3427 }
3428 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3429
3430call_complete:
3431 if (req_complete)
3432 req_complete(hdev, status);
3433}
3434
b78752cc 3435static void hci_rx_work(struct work_struct *work)
1da177e4 3436{
b78752cc 3437 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3438 struct sk_buff *skb;
3439
3440 BT_DBG("%s", hdev->name);
3441
1da177e4 3442 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3443 /* Send copy to monitor */
3444 hci_send_to_monitor(hdev, skb);
3445
1da177e4
LT
3446 if (atomic_read(&hdev->promisc)) {
3447 /* Send copy to the sockets */
470fe1b5 3448 hci_send_to_sock(hdev, skb);
1da177e4
LT
3449 }
3450
3451 if (test_bit(HCI_RAW, &hdev->flags)) {
3452 kfree_skb(skb);
3453 continue;
3454 }
3455
3456 if (test_bit(HCI_INIT, &hdev->flags)) {
3457 /* Don't process data packets in this states. */
0d48d939 3458 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3459 case HCI_ACLDATA_PKT:
3460 case HCI_SCODATA_PKT:
3461 kfree_skb(skb);
3462 continue;
3ff50b79 3463 }
1da177e4
LT
3464 }
3465
3466 /* Process frame */
0d48d939 3467 switch (bt_cb(skb)->pkt_type) {
1da177e4 3468 case HCI_EVENT_PKT:
b78752cc 3469 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3470 hci_event_packet(hdev, skb);
3471 break;
3472
3473 case HCI_ACLDATA_PKT:
3474 BT_DBG("%s ACL data packet", hdev->name);
3475 hci_acldata_packet(hdev, skb);
3476 break;
3477
3478 case HCI_SCODATA_PKT:
3479 BT_DBG("%s SCO data packet", hdev->name);
3480 hci_scodata_packet(hdev, skb);
3481 break;
3482
3483 default:
3484 kfree_skb(skb);
3485 break;
3486 }
3487 }
1da177e4
LT
3488}
3489
c347b765 3490static void hci_cmd_work(struct work_struct *work)
1da177e4 3491{
c347b765 3492 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3493 struct sk_buff *skb;
3494
2104786b
AE
3495 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3496 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3497
1da177e4 3498 /* Send queued commands */
5a08ecce
AE
3499 if (atomic_read(&hdev->cmd_cnt)) {
3500 skb = skb_dequeue(&hdev->cmd_q);
3501 if (!skb)
3502 return;
3503
7585b97a 3504 kfree_skb(hdev->sent_cmd);
1da177e4 3505
70f23020
AE
3506 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3507 if (hdev->sent_cmd) {
1da177e4
LT
3508 atomic_dec(&hdev->cmd_cnt);
3509 hci_send_frame(skb);
7bdb8a5c
SJ
3510 if (test_bit(HCI_RESET, &hdev->flags))
3511 del_timer(&hdev->cmd_timer);
3512 else
3513 mod_timer(&hdev->cmd_timer,
5f246e89 3514 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3515 } else {
3516 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3517 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3518 }
3519 }
3520}
2519a1fc
AG
3521
3522int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3523{
3524 /* General inquiry access code (GIAC) */
3525 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3526 struct hci_cp_inquiry cp;
3527
3528 BT_DBG("%s", hdev->name);
3529
3530 if (test_bit(HCI_INQUIRY, &hdev->flags))
3531 return -EINPROGRESS;
3532
4663262c
JH
3533 inquiry_cache_flush(hdev);
3534
2519a1fc
AG
3535 memset(&cp, 0, sizeof(cp));
3536 memcpy(&cp.lap, lap, sizeof(cp.lap));
3537 cp.length = length;
3538
3539 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3540}
023d5049
AG
3541
3542int hci_cancel_inquiry(struct hci_dev *hdev)
3543{
3544 BT_DBG("%s", hdev->name);
3545
3546 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 3547 return -EALREADY;
023d5049
AG
3548
3549 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3550}
31f7956c
AG
3551
3552u8 bdaddr_to_le(u8 bdaddr_type)
3553{
3554 switch (bdaddr_type) {
3555 case BDADDR_LE_PUBLIC:
3556 return ADDR_LE_DEV_PUBLIC;
3557
3558 default:
3559 /* Fallback to LE Random address type */
3560 return ADDR_LE_DEV_RANDOM;
3561 }
3562}