Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | BlueZ - Bluetooth protocol stack for Linux | |
3 | Copyright (C) 2000-2001 Qualcomm Incorporated | |
4 | ||
5 | Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> | |
6 | ||
7 | This program is free software; you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License version 2 as | |
9 | published by the Free Software Foundation; | |
10 | ||
11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
12 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
13 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. | |
14 | IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY | |
15 | CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES | |
16 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
17 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
18 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
19 | ||
20 | ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, | |
21 | COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS | |
22 | SOFTWARE IS DISCLAIMED. | |
23 | */ | |
24 | ||
25 | /* Bluetooth HCI core. */ | |
26 | ||
1da177e4 LT |
27 | #include <linux/module.h> |
28 | #include <linux/kmod.h> | |
29 | ||
30 | #include <linux/types.h> | |
31 | #include <linux/errno.h> | |
32 | #include <linux/kernel.h> | |
1da177e4 LT |
33 | #include <linux/sched.h> |
34 | #include <linux/slab.h> | |
35 | #include <linux/poll.h> | |
36 | #include <linux/fcntl.h> | |
37 | #include <linux/init.h> | |
38 | #include <linux/skbuff.h> | |
39 | #include <linux/interrupt.h> | |
40 | #include <linux/notifier.h> | |
41 | #include <net/sock.h> | |
42 | ||
43 | #include <asm/system.h> | |
44 | #include <asm/uaccess.h> | |
45 | #include <asm/unaligned.h> | |
46 | ||
47 | #include <net/bluetooth/bluetooth.h> | |
48 | #include <net/bluetooth/hci_core.h> | |
49 | ||
50 | #ifndef CONFIG_BT_HCI_CORE_DEBUG | |
51 | #undef BT_DBG | |
52 | #define BT_DBG(D...) | |
53 | #endif | |
54 | ||
55 | static void hci_cmd_task(unsigned long arg); | |
56 | static void hci_rx_task(unsigned long arg); | |
57 | static void hci_tx_task(unsigned long arg); | |
58 | static void hci_notify(struct hci_dev *hdev, int event); | |
59 | ||
60 | static DEFINE_RWLOCK(hci_task_lock); | |
61 | ||
62 | /* HCI device list */ | |
63 | LIST_HEAD(hci_dev_list); | |
64 | DEFINE_RWLOCK(hci_dev_list_lock); | |
65 | ||
66 | /* HCI callback list */ | |
67 | LIST_HEAD(hci_cb_list); | |
68 | DEFINE_RWLOCK(hci_cb_list_lock); | |
69 | ||
70 | /* HCI protocols */ | |
71 | #define HCI_MAX_PROTO 2 | |
72 | struct hci_proto *hci_proto[HCI_MAX_PROTO]; | |
73 | ||
74 | /* HCI notifiers list */ | |
e041c683 | 75 | static ATOMIC_NOTIFIER_HEAD(hci_notifier); |
1da177e4 LT |
76 | |
77 | /* ---- HCI notifications ---- */ | |
78 | ||
79 | int hci_register_notifier(struct notifier_block *nb) | |
80 | { | |
e041c683 | 81 | return atomic_notifier_chain_register(&hci_notifier, nb); |
1da177e4 LT |
82 | } |
83 | ||
84 | int hci_unregister_notifier(struct notifier_block *nb) | |
85 | { | |
e041c683 | 86 | return atomic_notifier_chain_unregister(&hci_notifier, nb); |
1da177e4 LT |
87 | } |
88 | ||
6516455d | 89 | static void hci_notify(struct hci_dev *hdev, int event) |
1da177e4 | 90 | { |
e041c683 | 91 | atomic_notifier_call_chain(&hci_notifier, event, hdev); |
1da177e4 LT |
92 | } |
93 | ||
94 | /* ---- HCI requests ---- */ | |
95 | ||
96 | void hci_req_complete(struct hci_dev *hdev, int result) | |
97 | { | |
98 | BT_DBG("%s result 0x%2.2x", hdev->name, result); | |
99 | ||
100 | if (hdev->req_status == HCI_REQ_PEND) { | |
101 | hdev->req_result = result; | |
102 | hdev->req_status = HCI_REQ_DONE; | |
103 | wake_up_interruptible(&hdev->req_wait_q); | |
104 | } | |
105 | } | |
106 | ||
107 | static void hci_req_cancel(struct hci_dev *hdev, int err) | |
108 | { | |
109 | BT_DBG("%s err 0x%2.2x", hdev->name, err); | |
110 | ||
111 | if (hdev->req_status == HCI_REQ_PEND) { | |
112 | hdev->req_result = err; | |
113 | hdev->req_status = HCI_REQ_CANCELED; | |
114 | wake_up_interruptible(&hdev->req_wait_q); | |
115 | } | |
116 | } | |
117 | ||
118 | /* Execute request and wait for completion. */ | |
119 | static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), | |
120 | unsigned long opt, __u32 timeout) | |
121 | { | |
122 | DECLARE_WAITQUEUE(wait, current); | |
123 | int err = 0; | |
124 | ||
125 | BT_DBG("%s start", hdev->name); | |
126 | ||
127 | hdev->req_status = HCI_REQ_PEND; | |
128 | ||
129 | add_wait_queue(&hdev->req_wait_q, &wait); | |
130 | set_current_state(TASK_INTERRUPTIBLE); | |
131 | ||
132 | req(hdev, opt); | |
133 | schedule_timeout(timeout); | |
134 | ||
135 | remove_wait_queue(&hdev->req_wait_q, &wait); | |
136 | ||
137 | if (signal_pending(current)) | |
138 | return -EINTR; | |
139 | ||
140 | switch (hdev->req_status) { | |
141 | case HCI_REQ_DONE: | |
142 | err = -bt_err(hdev->req_result); | |
143 | break; | |
144 | ||
145 | case HCI_REQ_CANCELED: | |
146 | err = -hdev->req_result; | |
147 | break; | |
148 | ||
149 | default: | |
150 | err = -ETIMEDOUT; | |
151 | break; | |
152 | }; | |
153 | ||
154 | hdev->req_status = hdev->req_result = 0; | |
155 | ||
156 | BT_DBG("%s end: err %d", hdev->name, err); | |
157 | ||
158 | return err; | |
159 | } | |
160 | ||
161 | static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), | |
162 | unsigned long opt, __u32 timeout) | |
163 | { | |
164 | int ret; | |
165 | ||
166 | /* Serialize all requests */ | |
167 | hci_req_lock(hdev); | |
168 | ret = __hci_request(hdev, req, opt, timeout); | |
169 | hci_req_unlock(hdev); | |
170 | ||
171 | return ret; | |
172 | } | |
173 | ||
174 | static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) | |
175 | { | |
176 | BT_DBG("%s %ld", hdev->name, opt); | |
177 | ||
178 | /* Reset device */ | |
179 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); | |
180 | } | |
181 | ||
182 | static void hci_init_req(struct hci_dev *hdev, unsigned long opt) | |
183 | { | |
184 | struct sk_buff *skb; | |
1ebb9252 | 185 | __le16 param; |
1da177e4 LT |
186 | |
187 | BT_DBG("%s %ld", hdev->name, opt); | |
188 | ||
189 | /* Driver initialization */ | |
190 | ||
191 | /* Special commands */ | |
192 | while ((skb = skb_dequeue(&hdev->driver_init))) { | |
0d48d939 | 193 | bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; |
1da177e4 LT |
194 | skb->dev = (void *) hdev; |
195 | skb_queue_tail(&hdev->cmd_q, skb); | |
196 | hci_sched_cmd(hdev); | |
197 | } | |
198 | skb_queue_purge(&hdev->driver_init); | |
199 | ||
200 | /* Mandatory initialization */ | |
201 | ||
202 | /* Reset */ | |
203 | if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks)) | |
204 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); | |
205 | ||
206 | /* Read Local Supported Features */ | |
207 | hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL); | |
208 | ||
209 | /* Read Buffer Size (ACL mtu, max pkt, etc.) */ | |
210 | hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL); | |
211 | ||
212 | #if 0 | |
213 | /* Host buffer size */ | |
214 | { | |
215 | struct hci_cp_host_buffer_size cp; | |
216 | cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE); | |
217 | cp.sco_mtu = HCI_MAX_SCO_SIZE; | |
218 | cp.acl_max_pkt = __cpu_to_le16(0xffff); | |
219 | cp.sco_max_pkt = __cpu_to_le16(0xffff); | |
220 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp); | |
221 | } | |
222 | #endif | |
223 | ||
224 | /* Read BD Address */ | |
225 | hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL); | |
226 | ||
227 | /* Read Voice Setting */ | |
228 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL); | |
229 | ||
230 | /* Optional initialization */ | |
231 | ||
232 | /* Clear Event Filters */ | |
233 | { | |
234 | struct hci_cp_set_event_flt cp; | |
235 | cp.flt_type = HCI_FLT_CLEAR_ALL; | |
236 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp); | |
237 | } | |
238 | ||
239 | /* Page timeout ~20 secs */ | |
240 | param = __cpu_to_le16(0x8000); | |
241 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, ¶m); | |
242 | ||
243 | /* Connection accept timeout ~20 secs */ | |
244 | param = __cpu_to_le16(0x7d00); | |
245 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, ¶m); | |
246 | } | |
247 | ||
248 | static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) | |
249 | { | |
250 | __u8 scan = opt; | |
251 | ||
252 | BT_DBG("%s %x", hdev->name, scan); | |
253 | ||
254 | /* Inquiry and Page scans */ | |
255 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan); | |
256 | } | |
257 | ||
258 | static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) | |
259 | { | |
260 | __u8 auth = opt; | |
261 | ||
262 | BT_DBG("%s %x", hdev->name, auth); | |
263 | ||
264 | /* Authentication */ | |
265 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth); | |
266 | } | |
267 | ||
268 | static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) | |
269 | { | |
270 | __u8 encrypt = opt; | |
271 | ||
272 | BT_DBG("%s %x", hdev->name, encrypt); | |
273 | ||
274 | /* Authentication */ | |
275 | hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt); | |
276 | } | |
277 | ||
278 | /* Get HCI device by index. | |
279 | * Device is held on return. */ | |
280 | struct hci_dev *hci_dev_get(int index) | |
281 | { | |
282 | struct hci_dev *hdev = NULL; | |
283 | struct list_head *p; | |
284 | ||
285 | BT_DBG("%d", index); | |
286 | ||
287 | if (index < 0) | |
288 | return NULL; | |
289 | ||
290 | read_lock(&hci_dev_list_lock); | |
291 | list_for_each(p, &hci_dev_list) { | |
292 | struct hci_dev *d = list_entry(p, struct hci_dev, list); | |
293 | if (d->id == index) { | |
294 | hdev = hci_dev_hold(d); | |
295 | break; | |
296 | } | |
297 | } | |
298 | read_unlock(&hci_dev_list_lock); | |
299 | return hdev; | |
300 | } | |
1da177e4 LT |
301 | |
302 | /* ---- Inquiry support ---- */ | |
303 | static void inquiry_cache_flush(struct hci_dev *hdev) | |
304 | { | |
305 | struct inquiry_cache *cache = &hdev->inq_cache; | |
306 | struct inquiry_entry *next = cache->list, *e; | |
307 | ||
308 | BT_DBG("cache %p", cache); | |
309 | ||
310 | cache->list = NULL; | |
311 | while ((e = next)) { | |
312 | next = e->next; | |
313 | kfree(e); | |
314 | } | |
315 | } | |
316 | ||
317 | struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) | |
318 | { | |
319 | struct inquiry_cache *cache = &hdev->inq_cache; | |
320 | struct inquiry_entry *e; | |
321 | ||
322 | BT_DBG("cache %p, %s", cache, batostr(bdaddr)); | |
323 | ||
324 | for (e = cache->list; e; e = e->next) | |
325 | if (!bacmp(&e->data.bdaddr, bdaddr)) | |
326 | break; | |
327 | return e; | |
328 | } | |
329 | ||
330 | void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data) | |
331 | { | |
332 | struct inquiry_cache *cache = &hdev->inq_cache; | |
333 | struct inquiry_entry *e; | |
334 | ||
335 | BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr)); | |
336 | ||
337 | if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) { | |
338 | /* Entry not in the cache. Add new one. */ | |
339 | if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC))) | |
340 | return; | |
341 | memset(e, 0, sizeof(struct inquiry_entry)); | |
342 | e->next = cache->list; | |
343 | cache->list = e; | |
344 | } | |
345 | ||
346 | memcpy(&e->data, data, sizeof(*data)); | |
347 | e->timestamp = jiffies; | |
348 | cache->timestamp = jiffies; | |
349 | } | |
350 | ||
351 | static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) | |
352 | { | |
353 | struct inquiry_cache *cache = &hdev->inq_cache; | |
354 | struct inquiry_info *info = (struct inquiry_info *) buf; | |
355 | struct inquiry_entry *e; | |
356 | int copied = 0; | |
357 | ||
358 | for (e = cache->list; e && copied < num; e = e->next, copied++) { | |
359 | struct inquiry_data *data = &e->data; | |
360 | bacpy(&info->bdaddr, &data->bdaddr); | |
361 | info->pscan_rep_mode = data->pscan_rep_mode; | |
362 | info->pscan_period_mode = data->pscan_period_mode; | |
363 | info->pscan_mode = data->pscan_mode; | |
364 | memcpy(info->dev_class, data->dev_class, 3); | |
365 | info->clock_offset = data->clock_offset; | |
366 | info++; | |
367 | } | |
368 | ||
369 | BT_DBG("cache %p, copied %d", cache, copied); | |
370 | return copied; | |
371 | } | |
372 | ||
373 | static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) | |
374 | { | |
375 | struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; | |
376 | struct hci_cp_inquiry cp; | |
377 | ||
378 | BT_DBG("%s", hdev->name); | |
379 | ||
380 | if (test_bit(HCI_INQUIRY, &hdev->flags)) | |
381 | return; | |
382 | ||
383 | /* Start Inquiry */ | |
384 | memcpy(&cp.lap, &ir->lap, 3); | |
385 | cp.length = ir->length; | |
386 | cp.num_rsp = ir->num_rsp; | |
387 | hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp); | |
388 | } | |
389 | ||
390 | int hci_inquiry(void __user *arg) | |
391 | { | |
392 | __u8 __user *ptr = arg; | |
393 | struct hci_inquiry_req ir; | |
394 | struct hci_dev *hdev; | |
395 | int err = 0, do_inquiry = 0, max_rsp; | |
396 | long timeo; | |
397 | __u8 *buf; | |
398 | ||
399 | if (copy_from_user(&ir, ptr, sizeof(ir))) | |
400 | return -EFAULT; | |
401 | ||
402 | if (!(hdev = hci_dev_get(ir.dev_id))) | |
403 | return -ENODEV; | |
404 | ||
405 | hci_dev_lock_bh(hdev); | |
406 | if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || | |
407 | inquiry_cache_empty(hdev) || | |
408 | ir.flags & IREQ_CACHE_FLUSH) { | |
409 | inquiry_cache_flush(hdev); | |
410 | do_inquiry = 1; | |
411 | } | |
412 | hci_dev_unlock_bh(hdev); | |
413 | ||
414 | timeo = ir.length * 2 * HZ; | |
415 | if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0) | |
416 | goto done; | |
417 | ||
418 | /* for unlimited number of responses we will use buffer with 255 entries */ | |
419 | max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; | |
420 | ||
421 | /* cache_dump can't sleep. Therefore we allocate temp buffer and then | |
422 | * copy it to the user space. | |
423 | */ | |
424 | if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) { | |
425 | err = -ENOMEM; | |
426 | goto done; | |
427 | } | |
428 | ||
429 | hci_dev_lock_bh(hdev); | |
430 | ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); | |
431 | hci_dev_unlock_bh(hdev); | |
432 | ||
433 | BT_DBG("num_rsp %d", ir.num_rsp); | |
434 | ||
435 | if (!copy_to_user(ptr, &ir, sizeof(ir))) { | |
436 | ptr += sizeof(ir); | |
437 | if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * | |
438 | ir.num_rsp)) | |
439 | err = -EFAULT; | |
440 | } else | |
441 | err = -EFAULT; | |
442 | ||
443 | kfree(buf); | |
444 | ||
445 | done: | |
446 | hci_dev_put(hdev); | |
447 | return err; | |
448 | } | |
449 | ||
450 | /* ---- HCI ioctl helpers ---- */ | |
451 | ||
452 | int hci_dev_open(__u16 dev) | |
453 | { | |
454 | struct hci_dev *hdev; | |
455 | int ret = 0; | |
456 | ||
457 | if (!(hdev = hci_dev_get(dev))) | |
458 | return -ENODEV; | |
459 | ||
460 | BT_DBG("%s %p", hdev->name, hdev); | |
461 | ||
462 | hci_req_lock(hdev); | |
463 | ||
464 | if (test_bit(HCI_UP, &hdev->flags)) { | |
465 | ret = -EALREADY; | |
466 | goto done; | |
467 | } | |
468 | ||
469 | if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) | |
470 | set_bit(HCI_RAW, &hdev->flags); | |
471 | ||
472 | if (hdev->open(hdev)) { | |
473 | ret = -EIO; | |
474 | goto done; | |
475 | } | |
476 | ||
477 | if (!test_bit(HCI_RAW, &hdev->flags)) { | |
478 | atomic_set(&hdev->cmd_cnt, 1); | |
479 | set_bit(HCI_INIT, &hdev->flags); | |
480 | ||
481 | //__hci_request(hdev, hci_reset_req, 0, HZ); | |
482 | ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT); | |
483 | ||
484 | clear_bit(HCI_INIT, &hdev->flags); | |
485 | } | |
486 | ||
487 | if (!ret) { | |
488 | hci_dev_hold(hdev); | |
489 | set_bit(HCI_UP, &hdev->flags); | |
490 | hci_notify(hdev, HCI_DEV_UP); | |
491 | } else { | |
492 | /* Init failed, cleanup */ | |
493 | tasklet_kill(&hdev->rx_task); | |
494 | tasklet_kill(&hdev->tx_task); | |
495 | tasklet_kill(&hdev->cmd_task); | |
496 | ||
497 | skb_queue_purge(&hdev->cmd_q); | |
498 | skb_queue_purge(&hdev->rx_q); | |
499 | ||
500 | if (hdev->flush) | |
501 | hdev->flush(hdev); | |
502 | ||
503 | if (hdev->sent_cmd) { | |
504 | kfree_skb(hdev->sent_cmd); | |
505 | hdev->sent_cmd = NULL; | |
506 | } | |
507 | ||
508 | hdev->close(hdev); | |
509 | hdev->flags = 0; | |
510 | } | |
511 | ||
512 | done: | |
513 | hci_req_unlock(hdev); | |
514 | hci_dev_put(hdev); | |
515 | return ret; | |
516 | } | |
517 | ||
518 | static int hci_dev_do_close(struct hci_dev *hdev) | |
519 | { | |
520 | BT_DBG("%s %p", hdev->name, hdev); | |
521 | ||
522 | hci_req_cancel(hdev, ENODEV); | |
523 | hci_req_lock(hdev); | |
524 | ||
525 | if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { | |
526 | hci_req_unlock(hdev); | |
527 | return 0; | |
528 | } | |
529 | ||
530 | /* Kill RX and TX tasks */ | |
531 | tasklet_kill(&hdev->rx_task); | |
532 | tasklet_kill(&hdev->tx_task); | |
533 | ||
534 | hci_dev_lock_bh(hdev); | |
535 | inquiry_cache_flush(hdev); | |
536 | hci_conn_hash_flush(hdev); | |
537 | hci_dev_unlock_bh(hdev); | |
538 | ||
539 | hci_notify(hdev, HCI_DEV_DOWN); | |
540 | ||
541 | if (hdev->flush) | |
542 | hdev->flush(hdev); | |
543 | ||
544 | /* Reset device */ | |
545 | skb_queue_purge(&hdev->cmd_q); | |
546 | atomic_set(&hdev->cmd_cnt, 1); | |
547 | if (!test_bit(HCI_RAW, &hdev->flags)) { | |
548 | set_bit(HCI_INIT, &hdev->flags); | |
549 | __hci_request(hdev, hci_reset_req, 0, HZ/4); | |
550 | clear_bit(HCI_INIT, &hdev->flags); | |
551 | } | |
552 | ||
553 | /* Kill cmd task */ | |
554 | tasklet_kill(&hdev->cmd_task); | |
555 | ||
556 | /* Drop queues */ | |
557 | skb_queue_purge(&hdev->rx_q); | |
558 | skb_queue_purge(&hdev->cmd_q); | |
559 | skb_queue_purge(&hdev->raw_q); | |
560 | ||
561 | /* Drop last sent command */ | |
562 | if (hdev->sent_cmd) { | |
563 | kfree_skb(hdev->sent_cmd); | |
564 | hdev->sent_cmd = NULL; | |
565 | } | |
566 | ||
567 | /* After this point our queues are empty | |
568 | * and no tasks are scheduled. */ | |
569 | hdev->close(hdev); | |
570 | ||
571 | /* Clear flags */ | |
572 | hdev->flags = 0; | |
573 | ||
574 | hci_req_unlock(hdev); | |
575 | ||
576 | hci_dev_put(hdev); | |
577 | return 0; | |
578 | } | |
579 | ||
580 | int hci_dev_close(__u16 dev) | |
581 | { | |
582 | struct hci_dev *hdev; | |
583 | int err; | |
584 | ||
585 | if (!(hdev = hci_dev_get(dev))) | |
586 | return -ENODEV; | |
587 | err = hci_dev_do_close(hdev); | |
588 | hci_dev_put(hdev); | |
589 | return err; | |
590 | } | |
591 | ||
592 | int hci_dev_reset(__u16 dev) | |
593 | { | |
594 | struct hci_dev *hdev; | |
595 | int ret = 0; | |
596 | ||
597 | if (!(hdev = hci_dev_get(dev))) | |
598 | return -ENODEV; | |
599 | ||
600 | hci_req_lock(hdev); | |
601 | tasklet_disable(&hdev->tx_task); | |
602 | ||
603 | if (!test_bit(HCI_UP, &hdev->flags)) | |
604 | goto done; | |
605 | ||
606 | /* Drop queues */ | |
607 | skb_queue_purge(&hdev->rx_q); | |
608 | skb_queue_purge(&hdev->cmd_q); | |
609 | ||
610 | hci_dev_lock_bh(hdev); | |
611 | inquiry_cache_flush(hdev); | |
612 | hci_conn_hash_flush(hdev); | |
613 | hci_dev_unlock_bh(hdev); | |
614 | ||
615 | if (hdev->flush) | |
616 | hdev->flush(hdev); | |
617 | ||
618 | atomic_set(&hdev->cmd_cnt, 1); | |
619 | hdev->acl_cnt = 0; hdev->sco_cnt = 0; | |
620 | ||
621 | if (!test_bit(HCI_RAW, &hdev->flags)) | |
622 | ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT); | |
623 | ||
624 | done: | |
625 | tasklet_enable(&hdev->tx_task); | |
626 | hci_req_unlock(hdev); | |
627 | hci_dev_put(hdev); | |
628 | return ret; | |
629 | } | |
630 | ||
631 | int hci_dev_reset_stat(__u16 dev) | |
632 | { | |
633 | struct hci_dev *hdev; | |
634 | int ret = 0; | |
635 | ||
636 | if (!(hdev = hci_dev_get(dev))) | |
637 | return -ENODEV; | |
638 | ||
639 | memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); | |
640 | ||
641 | hci_dev_put(hdev); | |
642 | ||
643 | return ret; | |
644 | } | |
645 | ||
646 | int hci_dev_cmd(unsigned int cmd, void __user *arg) | |
647 | { | |
648 | struct hci_dev *hdev; | |
649 | struct hci_dev_req dr; | |
650 | int err = 0; | |
651 | ||
652 | if (copy_from_user(&dr, arg, sizeof(dr))) | |
653 | return -EFAULT; | |
654 | ||
655 | if (!(hdev = hci_dev_get(dr.dev_id))) | |
656 | return -ENODEV; | |
657 | ||
658 | switch (cmd) { | |
659 | case HCISETAUTH: | |
660 | err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT); | |
661 | break; | |
662 | ||
663 | case HCISETENCRYPT: | |
664 | if (!lmp_encrypt_capable(hdev)) { | |
665 | err = -EOPNOTSUPP; | |
666 | break; | |
667 | } | |
668 | ||
669 | if (!test_bit(HCI_AUTH, &hdev->flags)) { | |
670 | /* Auth must be enabled first */ | |
671 | err = hci_request(hdev, hci_auth_req, | |
672 | dr.dev_opt, HCI_INIT_TIMEOUT); | |
673 | if (err) | |
674 | break; | |
675 | } | |
676 | ||
677 | err = hci_request(hdev, hci_encrypt_req, | |
678 | dr.dev_opt, HCI_INIT_TIMEOUT); | |
679 | break; | |
680 | ||
681 | case HCISETSCAN: | |
682 | err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT); | |
683 | break; | |
684 | ||
685 | case HCISETPTYPE: | |
686 | hdev->pkt_type = (__u16) dr.dev_opt; | |
687 | break; | |
688 | ||
689 | case HCISETLINKPOL: | |
690 | hdev->link_policy = (__u16) dr.dev_opt; | |
691 | break; | |
692 | ||
693 | case HCISETLINKMODE: | |
694 | hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT); | |
695 | break; | |
696 | ||
697 | case HCISETACLMTU: | |
698 | hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1); | |
699 | hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0); | |
700 | break; | |
701 | ||
702 | case HCISETSCOMTU: | |
703 | hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1); | |
704 | hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0); | |
705 | break; | |
706 | ||
707 | default: | |
708 | err = -EINVAL; | |
709 | break; | |
710 | } | |
711 | hci_dev_put(hdev); | |
712 | return err; | |
713 | } | |
714 | ||
715 | int hci_get_dev_list(void __user *arg) | |
716 | { | |
717 | struct hci_dev_list_req *dl; | |
718 | struct hci_dev_req *dr; | |
719 | struct list_head *p; | |
720 | int n = 0, size, err; | |
721 | __u16 dev_num; | |
722 | ||
723 | if (get_user(dev_num, (__u16 __user *) arg)) | |
724 | return -EFAULT; | |
725 | ||
726 | if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) | |
727 | return -EINVAL; | |
728 | ||
729 | size = sizeof(*dl) + dev_num * sizeof(*dr); | |
730 | ||
731 | if (!(dl = kmalloc(size, GFP_KERNEL))) | |
732 | return -ENOMEM; | |
733 | ||
734 | dr = dl->dev_req; | |
735 | ||
736 | read_lock_bh(&hci_dev_list_lock); | |
737 | list_for_each(p, &hci_dev_list) { | |
738 | struct hci_dev *hdev; | |
739 | hdev = list_entry(p, struct hci_dev, list); | |
740 | (dr + n)->dev_id = hdev->id; | |
741 | (dr + n)->dev_opt = hdev->flags; | |
742 | if (++n >= dev_num) | |
743 | break; | |
744 | } | |
745 | read_unlock_bh(&hci_dev_list_lock); | |
746 | ||
747 | dl->dev_num = n; | |
748 | size = sizeof(*dl) + n * sizeof(*dr); | |
749 | ||
750 | err = copy_to_user(arg, dl, size); | |
751 | kfree(dl); | |
752 | ||
753 | return err ? -EFAULT : 0; | |
754 | } | |
755 | ||
756 | int hci_get_dev_info(void __user *arg) | |
757 | { | |
758 | struct hci_dev *hdev; | |
759 | struct hci_dev_info di; | |
760 | int err = 0; | |
761 | ||
762 | if (copy_from_user(&di, arg, sizeof(di))) | |
763 | return -EFAULT; | |
764 | ||
765 | if (!(hdev = hci_dev_get(di.dev_id))) | |
766 | return -ENODEV; | |
767 | ||
768 | strcpy(di.name, hdev->name); | |
769 | di.bdaddr = hdev->bdaddr; | |
770 | di.type = hdev->type; | |
771 | di.flags = hdev->flags; | |
772 | di.pkt_type = hdev->pkt_type; | |
773 | di.acl_mtu = hdev->acl_mtu; | |
774 | di.acl_pkts = hdev->acl_pkts; | |
775 | di.sco_mtu = hdev->sco_mtu; | |
776 | di.sco_pkts = hdev->sco_pkts; | |
777 | di.link_policy = hdev->link_policy; | |
778 | di.link_mode = hdev->link_mode; | |
779 | ||
780 | memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); | |
781 | memcpy(&di.features, &hdev->features, sizeof(di.features)); | |
782 | ||
783 | if (copy_to_user(arg, &di, sizeof(di))) | |
784 | err = -EFAULT; | |
785 | ||
786 | hci_dev_put(hdev); | |
787 | ||
788 | return err; | |
789 | } | |
790 | ||
791 | /* ---- Interface to HCI drivers ---- */ | |
792 | ||
793 | /* Alloc HCI device */ | |
794 | struct hci_dev *hci_alloc_dev(void) | |
795 | { | |
796 | struct hci_dev *hdev; | |
797 | ||
798 | hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL); | |
799 | if (!hdev) | |
800 | return NULL; | |
801 | ||
802 | memset(hdev, 0, sizeof(struct hci_dev)); | |
803 | ||
804 | skb_queue_head_init(&hdev->driver_init); | |
805 | ||
806 | return hdev; | |
807 | } | |
808 | EXPORT_SYMBOL(hci_alloc_dev); | |
809 | ||
810 | /* Free HCI device */ | |
811 | void hci_free_dev(struct hci_dev *hdev) | |
812 | { | |
813 | skb_queue_purge(&hdev->driver_init); | |
814 | ||
815 | /* will free via class release */ | |
816 | class_device_put(&hdev->class_dev); | |
817 | } | |
818 | EXPORT_SYMBOL(hci_free_dev); | |
819 | ||
820 | /* Register HCI device */ | |
821 | int hci_register_dev(struct hci_dev *hdev) | |
822 | { | |
823 | struct list_head *head = &hci_dev_list, *p; | |
824 | int id = 0; | |
825 | ||
826 | BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner); | |
827 | ||
828 | if (!hdev->open || !hdev->close || !hdev->destruct) | |
829 | return -EINVAL; | |
830 | ||
831 | write_lock_bh(&hci_dev_list_lock); | |
832 | ||
833 | /* Find first available device id */ | |
834 | list_for_each(p, &hci_dev_list) { | |
835 | if (list_entry(p, struct hci_dev, list)->id != id) | |
836 | break; | |
837 | head = p; id++; | |
838 | } | |
839 | ||
840 | sprintf(hdev->name, "hci%d", id); | |
841 | hdev->id = id; | |
842 | list_add(&hdev->list, head); | |
843 | ||
844 | atomic_set(&hdev->refcnt, 1); | |
845 | spin_lock_init(&hdev->lock); | |
846 | ||
847 | hdev->flags = 0; | |
848 | hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); | |
849 | hdev->link_mode = (HCI_LM_ACCEPT); | |
850 | ||
851 | tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); | |
852 | tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); | |
853 | tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); | |
854 | ||
855 | skb_queue_head_init(&hdev->rx_q); | |
856 | skb_queue_head_init(&hdev->cmd_q); | |
857 | skb_queue_head_init(&hdev->raw_q); | |
858 | ||
859 | init_waitqueue_head(&hdev->req_wait_q); | |
860 | init_MUTEX(&hdev->req_lock); | |
861 | ||
862 | inquiry_cache_init(hdev); | |
863 | ||
864 | hci_conn_hash_init(hdev); | |
865 | ||
866 | memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); | |
867 | ||
868 | atomic_set(&hdev->promisc, 0); | |
869 | ||
870 | write_unlock_bh(&hci_dev_list_lock); | |
871 | ||
872 | hci_register_sysfs(hdev); | |
873 | ||
874 | hci_notify(hdev, HCI_DEV_REG); | |
875 | ||
876 | return id; | |
877 | } | |
878 | EXPORT_SYMBOL(hci_register_dev); | |
879 | ||
880 | /* Unregister HCI device */ | |
881 | int hci_unregister_dev(struct hci_dev *hdev) | |
882 | { | |
883 | BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); | |
884 | ||
885 | hci_unregister_sysfs(hdev); | |
886 | ||
887 | write_lock_bh(&hci_dev_list_lock); | |
888 | list_del(&hdev->list); | |
889 | write_unlock_bh(&hci_dev_list_lock); | |
890 | ||
891 | hci_dev_do_close(hdev); | |
892 | ||
893 | hci_notify(hdev, HCI_DEV_UNREG); | |
894 | ||
895 | __hci_dev_put(hdev); | |
896 | return 0; | |
897 | } | |
898 | EXPORT_SYMBOL(hci_unregister_dev); | |
899 | ||
900 | /* Suspend HCI device */ | |
901 | int hci_suspend_dev(struct hci_dev *hdev) | |
902 | { | |
903 | hci_notify(hdev, HCI_DEV_SUSPEND); | |
904 | return 0; | |
905 | } | |
906 | EXPORT_SYMBOL(hci_suspend_dev); | |
907 | ||
908 | /* Resume HCI device */ | |
909 | int hci_resume_dev(struct hci_dev *hdev) | |
910 | { | |
911 | hci_notify(hdev, HCI_DEV_RESUME); | |
912 | return 0; | |
913 | } | |
914 | EXPORT_SYMBOL(hci_resume_dev); | |
915 | ||
916 | /* ---- Interface to upper protocols ---- */ | |
917 | ||
918 | /* Register/Unregister protocols. | |
919 | * hci_task_lock is used to ensure that no tasks are running. */ | |
920 | int hci_register_proto(struct hci_proto *hp) | |
921 | { | |
922 | int err = 0; | |
923 | ||
924 | BT_DBG("%p name %s id %d", hp, hp->name, hp->id); | |
925 | ||
926 | if (hp->id >= HCI_MAX_PROTO) | |
927 | return -EINVAL; | |
928 | ||
929 | write_lock_bh(&hci_task_lock); | |
930 | ||
931 | if (!hci_proto[hp->id]) | |
932 | hci_proto[hp->id] = hp; | |
933 | else | |
934 | err = -EEXIST; | |
935 | ||
936 | write_unlock_bh(&hci_task_lock); | |
937 | ||
938 | return err; | |
939 | } | |
940 | EXPORT_SYMBOL(hci_register_proto); | |
941 | ||
942 | int hci_unregister_proto(struct hci_proto *hp) | |
943 | { | |
944 | int err = 0; | |
945 | ||
946 | BT_DBG("%p name %s id %d", hp, hp->name, hp->id); | |
947 | ||
948 | if (hp->id >= HCI_MAX_PROTO) | |
949 | return -EINVAL; | |
950 | ||
951 | write_lock_bh(&hci_task_lock); | |
952 | ||
953 | if (hci_proto[hp->id]) | |
954 | hci_proto[hp->id] = NULL; | |
955 | else | |
956 | err = -ENOENT; | |
957 | ||
958 | write_unlock_bh(&hci_task_lock); | |
959 | ||
960 | return err; | |
961 | } | |
962 | EXPORT_SYMBOL(hci_unregister_proto); | |
963 | ||
964 | int hci_register_cb(struct hci_cb *cb) | |
965 | { | |
966 | BT_DBG("%p name %s", cb, cb->name); | |
967 | ||
968 | write_lock_bh(&hci_cb_list_lock); | |
969 | list_add(&cb->list, &hci_cb_list); | |
970 | write_unlock_bh(&hci_cb_list_lock); | |
971 | ||
972 | return 0; | |
973 | } | |
974 | EXPORT_SYMBOL(hci_register_cb); | |
975 | ||
976 | int hci_unregister_cb(struct hci_cb *cb) | |
977 | { | |
978 | BT_DBG("%p name %s", cb, cb->name); | |
979 | ||
980 | write_lock_bh(&hci_cb_list_lock); | |
981 | list_del(&cb->list); | |
982 | write_unlock_bh(&hci_cb_list_lock); | |
983 | ||
984 | return 0; | |
985 | } | |
986 | EXPORT_SYMBOL(hci_unregister_cb); | |
987 | ||
988 | static int hci_send_frame(struct sk_buff *skb) | |
989 | { | |
990 | struct hci_dev *hdev = (struct hci_dev *) skb->dev; | |
991 | ||
992 | if (!hdev) { | |
993 | kfree_skb(skb); | |
994 | return -ENODEV; | |
995 | } | |
996 | ||
0d48d939 | 997 | BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); |
1da177e4 LT |
998 | |
999 | if (atomic_read(&hdev->promisc)) { | |
1000 | /* Time stamp */ | |
a61bbcf2 | 1001 | __net_timestamp(skb); |
1da177e4 LT |
1002 | |
1003 | hci_send_to_sock(hdev, skb); | |
1004 | } | |
1005 | ||
1006 | /* Get rid of skb owner, prior to sending to the driver. */ | |
1007 | skb_orphan(skb); | |
1008 | ||
1009 | return hdev->send(skb); | |
1010 | } | |
1011 | ||
1012 | /* Send HCI command */ | |
1013 | int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param) | |
1014 | { | |
1015 | int len = HCI_COMMAND_HDR_SIZE + plen; | |
1016 | struct hci_command_hdr *hdr; | |
1017 | struct sk_buff *skb; | |
1018 | ||
1019 | BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen); | |
1020 | ||
1021 | skb = bt_skb_alloc(len, GFP_ATOMIC); | |
1022 | if (!skb) { | |
1023 | BT_ERR("%s Can't allocate memory for HCI command", hdev->name); | |
1024 | return -ENOMEM; | |
1025 | } | |
1026 | ||
1027 | hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); | |
1028 | hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf)); | |
1029 | hdr->plen = plen; | |
1030 | ||
1031 | if (plen) | |
1032 | memcpy(skb_put(skb, plen), param, plen); | |
1033 | ||
1034 | BT_DBG("skb len %d", skb->len); | |
1035 | ||
0d48d939 | 1036 | bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; |
1da177e4 LT |
1037 | skb->dev = (void *) hdev; |
1038 | skb_queue_tail(&hdev->cmd_q, skb); | |
1039 | hci_sched_cmd(hdev); | |
1040 | ||
1041 | return 0; | |
1042 | } | |
1da177e4 LT |
1043 | |
1044 | /* Get data from the previously sent command */ | |
1045 | void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf) | |
1046 | { | |
1047 | struct hci_command_hdr *hdr; | |
1048 | ||
1049 | if (!hdev->sent_cmd) | |
1050 | return NULL; | |
1051 | ||
1052 | hdr = (void *) hdev->sent_cmd->data; | |
1053 | ||
1054 | if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf))) | |
1055 | return NULL; | |
1056 | ||
1057 | BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf); | |
1058 | ||
1059 | return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; | |
1060 | } | |
1061 | ||
1062 | /* Send ACL data */ | |
1063 | static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) | |
1064 | { | |
1065 | struct hci_acl_hdr *hdr; | |
1066 | int len = skb->len; | |
1067 | ||
1068 | hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE); | |
1069 | hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags)); | |
1070 | hdr->dlen = __cpu_to_le16(len); | |
1071 | ||
1072 | skb->h.raw = (void *) hdr; | |
1073 | } | |
1074 | ||
1075 | int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) | |
1076 | { | |
1077 | struct hci_dev *hdev = conn->hdev; | |
1078 | struct sk_buff *list; | |
1079 | ||
1080 | BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); | |
1081 | ||
1082 | skb->dev = (void *) hdev; | |
0d48d939 | 1083 | bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; |
1da177e4 LT |
1084 | hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); |
1085 | ||
1086 | if (!(list = skb_shinfo(skb)->frag_list)) { | |
1087 | /* Non fragmented */ | |
1088 | BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); | |
1089 | ||
1090 | skb_queue_tail(&conn->data_q, skb); | |
1091 | } else { | |
1092 | /* Fragmented */ | |
1093 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); | |
1094 | ||
1095 | skb_shinfo(skb)->frag_list = NULL; | |
1096 | ||
1097 | /* Queue all fragments atomically */ | |
1098 | spin_lock_bh(&conn->data_q.lock); | |
1099 | ||
1100 | __skb_queue_tail(&conn->data_q, skb); | |
1101 | do { | |
1102 | skb = list; list = list->next; | |
1103 | ||
1104 | skb->dev = (void *) hdev; | |
0d48d939 | 1105 | bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; |
1da177e4 LT |
1106 | hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); |
1107 | ||
1108 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); | |
1109 | ||
1110 | __skb_queue_tail(&conn->data_q, skb); | |
1111 | } while (list); | |
1112 | ||
1113 | spin_unlock_bh(&conn->data_q.lock); | |
1114 | } | |
1115 | ||
1116 | hci_sched_tx(hdev); | |
1117 | return 0; | |
1118 | } | |
1119 | EXPORT_SYMBOL(hci_send_acl); | |
1120 | ||
1121 | /* Send SCO data */ | |
1122 | int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) | |
1123 | { | |
1124 | struct hci_dev *hdev = conn->hdev; | |
1125 | struct hci_sco_hdr hdr; | |
1126 | ||
1127 | BT_DBG("%s len %d", hdev->name, skb->len); | |
1128 | ||
1129 | if (skb->len > hdev->sco_mtu) { | |
1130 | kfree_skb(skb); | |
1131 | return -EINVAL; | |
1132 | } | |
1133 | ||
1134 | hdr.handle = __cpu_to_le16(conn->handle); | |
1135 | hdr.dlen = skb->len; | |
1136 | ||
1137 | skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE); | |
1138 | memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE); | |
1139 | ||
1140 | skb->dev = (void *) hdev; | |
0d48d939 | 1141 | bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; |
1da177e4 LT |
1142 | skb_queue_tail(&conn->data_q, skb); |
1143 | hci_sched_tx(hdev); | |
1144 | return 0; | |
1145 | } | |
1146 | EXPORT_SYMBOL(hci_send_sco); | |
1147 | ||
1148 | /* ---- HCI TX task (outgoing data) ---- */ | |
1149 | ||
1150 | /* HCI Connection scheduler */ | |
1151 | static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) | |
1152 | { | |
1153 | struct hci_conn_hash *h = &hdev->conn_hash; | |
1154 | struct hci_conn *conn = NULL; | |
1155 | int num = 0, min = ~0; | |
1156 | struct list_head *p; | |
1157 | ||
1158 | /* We don't have to lock device here. Connections are always | |
1159 | * added and removed with TX task disabled. */ | |
1160 | list_for_each(p, &h->list) { | |
1161 | struct hci_conn *c; | |
1162 | c = list_entry(p, struct hci_conn, list); | |
1163 | ||
1164 | if (c->type != type || c->state != BT_CONNECTED | |
1165 | || skb_queue_empty(&c->data_q)) | |
1166 | continue; | |
1167 | num++; | |
1168 | ||
1169 | if (c->sent < min) { | |
1170 | min = c->sent; | |
1171 | conn = c; | |
1172 | } | |
1173 | } | |
1174 | ||
1175 | if (conn) { | |
1176 | int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); | |
1177 | int q = cnt / num; | |
1178 | *quote = q ? q : 1; | |
1179 | } else | |
1180 | *quote = 0; | |
1181 | ||
1182 | BT_DBG("conn %p quote %d", conn, *quote); | |
1183 | return conn; | |
1184 | } | |
1185 | ||
1186 | static inline void hci_acl_tx_to(struct hci_dev *hdev) | |
1187 | { | |
1188 | struct hci_conn_hash *h = &hdev->conn_hash; | |
1189 | struct list_head *p; | |
1190 | struct hci_conn *c; | |
1191 | ||
1192 | BT_ERR("%s ACL tx timeout", hdev->name); | |
1193 | ||
1194 | /* Kill stalled connections */ | |
1195 | list_for_each(p, &h->list) { | |
1196 | c = list_entry(p, struct hci_conn, list); | |
1197 | if (c->type == ACL_LINK && c->sent) { | |
1198 | BT_ERR("%s killing stalled ACL connection %s", | |
1199 | hdev->name, batostr(&c->dst)); | |
1200 | hci_acl_disconn(c, 0x13); | |
1201 | } | |
1202 | } | |
1203 | } | |
1204 | ||
1205 | static inline void hci_sched_acl(struct hci_dev *hdev) | |
1206 | { | |
1207 | struct hci_conn *conn; | |
1208 | struct sk_buff *skb; | |
1209 | int quote; | |
1210 | ||
1211 | BT_DBG("%s", hdev->name); | |
1212 | ||
1213 | if (!test_bit(HCI_RAW, &hdev->flags)) { | |
1214 | /* ACL tx timeout must be longer than maximum | |
1215 | * link supervision timeout (40.9 seconds) */ | |
1216 | if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45)) | |
1217 | hci_acl_tx_to(hdev); | |
1218 | } | |
1219 | ||
1220 | while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { | |
1221 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { | |
1222 | BT_DBG("skb %p len %d", skb, skb->len); | |
1223 | hci_send_frame(skb); | |
1224 | hdev->acl_last_tx = jiffies; | |
1225 | ||
1226 | hdev->acl_cnt--; | |
1227 | conn->sent++; | |
1228 | } | |
1229 | } | |
1230 | } | |
1231 | ||
1232 | /* Schedule SCO */ | |
1233 | static inline void hci_sched_sco(struct hci_dev *hdev) | |
1234 | { | |
1235 | struct hci_conn *conn; | |
1236 | struct sk_buff *skb; | |
1237 | int quote; | |
1238 | ||
1239 | BT_DBG("%s", hdev->name); | |
1240 | ||
1241 | while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { | |
1242 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { | |
1243 | BT_DBG("skb %p len %d", skb, skb->len); | |
1244 | hci_send_frame(skb); | |
1245 | ||
1246 | conn->sent++; | |
1247 | if (conn->sent == ~0) | |
1248 | conn->sent = 0; | |
1249 | } | |
1250 | } | |
1251 | } | |
1252 | ||
1253 | static void hci_tx_task(unsigned long arg) | |
1254 | { | |
1255 | struct hci_dev *hdev = (struct hci_dev *) arg; | |
1256 | struct sk_buff *skb; | |
1257 | ||
1258 | read_lock(&hci_task_lock); | |
1259 | ||
1260 | BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); | |
1261 | ||
1262 | /* Schedule queues and send stuff to HCI driver */ | |
1263 | ||
1264 | hci_sched_acl(hdev); | |
1265 | ||
1266 | hci_sched_sco(hdev); | |
1267 | ||
1268 | /* Send next queued raw (unknown type) packet */ | |
1269 | while ((skb = skb_dequeue(&hdev->raw_q))) | |
1270 | hci_send_frame(skb); | |
1271 | ||
1272 | read_unlock(&hci_task_lock); | |
1273 | } | |
1274 | ||
1275 | /* ----- HCI RX task (incoming data proccessing) ----- */ | |
1276 | ||
1277 | /* ACL data packet */ | |
1278 | static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) | |
1279 | { | |
1280 | struct hci_acl_hdr *hdr = (void *) skb->data; | |
1281 | struct hci_conn *conn; | |
1282 | __u16 handle, flags; | |
1283 | ||
1284 | skb_pull(skb, HCI_ACL_HDR_SIZE); | |
1285 | ||
1286 | handle = __le16_to_cpu(hdr->handle); | |
1287 | flags = hci_flags(handle); | |
1288 | handle = hci_handle(handle); | |
1289 | ||
1290 | BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); | |
1291 | ||
1292 | hdev->stat.acl_rx++; | |
1293 | ||
1294 | hci_dev_lock(hdev); | |
1295 | conn = hci_conn_hash_lookup_handle(hdev, handle); | |
1296 | hci_dev_unlock(hdev); | |
1297 | ||
1298 | if (conn) { | |
1299 | register struct hci_proto *hp; | |
1300 | ||
1301 | /* Send to upper protocol */ | |
1302 | if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { | |
1303 | hp->recv_acldata(conn, skb, flags); | |
1304 | return; | |
1305 | } | |
1306 | } else { | |
1307 | BT_ERR("%s ACL packet for unknown connection handle %d", | |
1308 | hdev->name, handle); | |
1309 | } | |
1310 | ||
1311 | kfree_skb(skb); | |
1312 | } | |
1313 | ||
1314 | /* SCO data packet */ | |
1315 | static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) | |
1316 | { | |
1317 | struct hci_sco_hdr *hdr = (void *) skb->data; | |
1318 | struct hci_conn *conn; | |
1319 | __u16 handle; | |
1320 | ||
1321 | skb_pull(skb, HCI_SCO_HDR_SIZE); | |
1322 | ||
1323 | handle = __le16_to_cpu(hdr->handle); | |
1324 | ||
1325 | BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle); | |
1326 | ||
1327 | hdev->stat.sco_rx++; | |
1328 | ||
1329 | hci_dev_lock(hdev); | |
1330 | conn = hci_conn_hash_lookup_handle(hdev, handle); | |
1331 | hci_dev_unlock(hdev); | |
1332 | ||
1333 | if (conn) { | |
1334 | register struct hci_proto *hp; | |
1335 | ||
1336 | /* Send to upper protocol */ | |
1337 | if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) { | |
1338 | hp->recv_scodata(conn, skb); | |
1339 | return; | |
1340 | } | |
1341 | } else { | |
1342 | BT_ERR("%s SCO packet for unknown connection handle %d", | |
1343 | hdev->name, handle); | |
1344 | } | |
1345 | ||
1346 | kfree_skb(skb); | |
1347 | } | |
1348 | ||
6516455d | 1349 | static void hci_rx_task(unsigned long arg) |
1da177e4 LT |
1350 | { |
1351 | struct hci_dev *hdev = (struct hci_dev *) arg; | |
1352 | struct sk_buff *skb; | |
1353 | ||
1354 | BT_DBG("%s", hdev->name); | |
1355 | ||
1356 | read_lock(&hci_task_lock); | |
1357 | ||
1358 | while ((skb = skb_dequeue(&hdev->rx_q))) { | |
1359 | if (atomic_read(&hdev->promisc)) { | |
1360 | /* Send copy to the sockets */ | |
1361 | hci_send_to_sock(hdev, skb); | |
1362 | } | |
1363 | ||
1364 | if (test_bit(HCI_RAW, &hdev->flags)) { | |
1365 | kfree_skb(skb); | |
1366 | continue; | |
1367 | } | |
1368 | ||
1369 | if (test_bit(HCI_INIT, &hdev->flags)) { | |
1370 | /* Don't process data packets in this states. */ | |
0d48d939 | 1371 | switch (bt_cb(skb)->pkt_type) { |
1da177e4 LT |
1372 | case HCI_ACLDATA_PKT: |
1373 | case HCI_SCODATA_PKT: | |
1374 | kfree_skb(skb); | |
1375 | continue; | |
1376 | }; | |
1377 | } | |
1378 | ||
1379 | /* Process frame */ | |
0d48d939 | 1380 | switch (bt_cb(skb)->pkt_type) { |
1da177e4 LT |
1381 | case HCI_EVENT_PKT: |
1382 | hci_event_packet(hdev, skb); | |
1383 | break; | |
1384 | ||
1385 | case HCI_ACLDATA_PKT: | |
1386 | BT_DBG("%s ACL data packet", hdev->name); | |
1387 | hci_acldata_packet(hdev, skb); | |
1388 | break; | |
1389 | ||
1390 | case HCI_SCODATA_PKT: | |
1391 | BT_DBG("%s SCO data packet", hdev->name); | |
1392 | hci_scodata_packet(hdev, skb); | |
1393 | break; | |
1394 | ||
1395 | default: | |
1396 | kfree_skb(skb); | |
1397 | break; | |
1398 | } | |
1399 | } | |
1400 | ||
1401 | read_unlock(&hci_task_lock); | |
1402 | } | |
1403 | ||
1404 | static void hci_cmd_task(unsigned long arg) | |
1405 | { | |
1406 | struct hci_dev *hdev = (struct hci_dev *) arg; | |
1407 | struct sk_buff *skb; | |
1408 | ||
1409 | BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); | |
1410 | ||
1411 | if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) { | |
1412 | BT_ERR("%s command tx timeout", hdev->name); | |
1413 | atomic_set(&hdev->cmd_cnt, 1); | |
1414 | } | |
1415 | ||
1416 | /* Send queued commands */ | |
1417 | if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { | |
1418 | if (hdev->sent_cmd) | |
1419 | kfree_skb(hdev->sent_cmd); | |
1420 | ||
1421 | if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) { | |
1422 | atomic_dec(&hdev->cmd_cnt); | |
1423 | hci_send_frame(skb); | |
1424 | hdev->cmd_last_tx = jiffies; | |
1425 | } else { | |
1426 | skb_queue_head(&hdev->cmd_q, skb); | |
1427 | hci_sched_cmd(hdev); | |
1428 | } | |
1429 | } | |
1430 | } |