net: return operator cleanup
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / net / bluetooth / hci_core.h
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27
28 #include <net/bluetooth/hci.h>
29
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
33
34 /* HCI Core structures */
35 struct inquiry_data {
36 bdaddr_t bdaddr;
37 __u8 pscan_rep_mode;
38 __u8 pscan_period_mode;
39 __u8 pscan_mode;
40 __u8 dev_class[3];
41 __le16 clock_offset;
42 __s8 rssi;
43 __u8 ssp_mode;
44 };
45
46 struct inquiry_entry {
47 struct inquiry_entry *next;
48 __u32 timestamp;
49 struct inquiry_data data;
50 };
51
52 struct inquiry_cache {
53 spinlock_t lock;
54 __u32 timestamp;
55 struct inquiry_entry *list;
56 };
57
58 struct hci_conn_hash {
59 struct list_head list;
60 spinlock_t lock;
61 unsigned int acl_num;
62 unsigned int sco_num;
63 };
64
65 struct bdaddr_list {
66 struct list_head list;
67 bdaddr_t bdaddr;
68 };
69 #define NUM_REASSEMBLY 4
70 struct hci_dev {
71 struct list_head list;
72 spinlock_t lock;
73 atomic_t refcnt;
74
75 char name[8];
76 unsigned long flags;
77 __u16 id;
78 __u8 bus;
79 __u8 dev_type;
80 bdaddr_t bdaddr;
81 __u8 dev_name[248];
82 __u8 dev_class[3];
83 __u8 features[8];
84 __u8 commands[64];
85 __u8 ssp_mode;
86 __u8 hci_ver;
87 __u16 hci_rev;
88 __u16 manufacturer;
89 __u16 voice_setting;
90
91 __u16 pkt_type;
92 __u16 esco_type;
93 __u16 link_policy;
94 __u16 link_mode;
95
96 __u32 idle_timeout;
97 __u16 sniff_min_interval;
98 __u16 sniff_max_interval;
99
100 unsigned long quirks;
101
102 atomic_t cmd_cnt;
103 unsigned int acl_cnt;
104 unsigned int sco_cnt;
105
106 unsigned int acl_mtu;
107 unsigned int sco_mtu;
108 unsigned int acl_pkts;
109 unsigned int sco_pkts;
110
111 unsigned long cmd_last_tx;
112 unsigned long acl_last_tx;
113 unsigned long sco_last_tx;
114
115 struct workqueue_struct *workqueue;
116
117 struct tasklet_struct cmd_task;
118 struct tasklet_struct rx_task;
119 struct tasklet_struct tx_task;
120
121 struct sk_buff_head rx_q;
122 struct sk_buff_head raw_q;
123 struct sk_buff_head cmd_q;
124
125 struct sk_buff *sent_cmd;
126 struct sk_buff *reassembly[NUM_REASSEMBLY];
127
128 struct mutex req_lock;
129 wait_queue_head_t req_wait_q;
130 __u32 req_status;
131 __u32 req_result;
132
133 struct inquiry_cache inq_cache;
134 struct hci_conn_hash conn_hash;
135 struct list_head blacklist;
136
137 struct hci_dev_stats stat;
138
139 struct sk_buff_head driver_init;
140
141 void *driver_data;
142 void *core_data;
143
144 atomic_t promisc;
145
146 struct dentry *debugfs;
147
148 struct device *parent;
149 struct device dev;
150
151 struct rfkill *rfkill;
152
153 struct module *owner;
154
155 int (*open)(struct hci_dev *hdev);
156 int (*close)(struct hci_dev *hdev);
157 int (*flush)(struct hci_dev *hdev);
158 int (*send)(struct sk_buff *skb);
159 void (*destruct)(struct hci_dev *hdev);
160 void (*notify)(struct hci_dev *hdev, unsigned int evt);
161 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
162 };
163
164 struct hci_conn {
165 struct list_head list;
166
167 atomic_t refcnt;
168 spinlock_t lock;
169
170 bdaddr_t dst;
171 __u16 handle;
172 __u16 state;
173 __u8 mode;
174 __u8 type;
175 __u8 out;
176 __u8 attempt;
177 __u8 dev_class[3];
178 __u8 features[8];
179 __u8 ssp_mode;
180 __u16 interval;
181 __u16 pkt_type;
182 __u16 link_policy;
183 __u32 link_mode;
184 __u8 auth_type;
185 __u8 sec_level;
186 __u8 power_save;
187 __u16 disc_timeout;
188 unsigned long pend;
189
190 unsigned int sent;
191
192 struct sk_buff_head data_q;
193
194 struct timer_list disc_timer;
195 struct timer_list idle_timer;
196
197 struct work_struct work_add;
198 struct work_struct work_del;
199
200 struct device dev;
201 atomic_t devref;
202
203 struct hci_dev *hdev;
204 void *l2cap_data;
205 void *sco_data;
206 void *priv;
207
208 struct hci_conn *link;
209 };
210
211 extern struct hci_proto *hci_proto[];
212 extern struct list_head hci_dev_list;
213 extern struct list_head hci_cb_list;
214 extern rwlock_t hci_dev_list_lock;
215 extern rwlock_t hci_cb_list_lock;
216
217 /* ----- Inquiry cache ----- */
218 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
219 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
220
221 #define inquiry_cache_lock(c) spin_lock(&c->lock)
222 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
223 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
224 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
225
226 static inline void inquiry_cache_init(struct hci_dev *hdev)
227 {
228 struct inquiry_cache *c = &hdev->inq_cache;
229 spin_lock_init(&c->lock);
230 c->list = NULL;
231 }
232
233 static inline int inquiry_cache_empty(struct hci_dev *hdev)
234 {
235 struct inquiry_cache *c = &hdev->inq_cache;
236 return c->list == NULL;
237 }
238
239 static inline long inquiry_cache_age(struct hci_dev *hdev)
240 {
241 struct inquiry_cache *c = &hdev->inq_cache;
242 return jiffies - c->timestamp;
243 }
244
245 static inline long inquiry_entry_age(struct inquiry_entry *e)
246 {
247 return jiffies - e->timestamp;
248 }
249
250 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
251 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
252
253 /* ----- HCI Connections ----- */
254 enum {
255 HCI_CONN_AUTH_PEND,
256 HCI_CONN_ENCRYPT_PEND,
257 HCI_CONN_RSWITCH_PEND,
258 HCI_CONN_MODE_CHANGE_PEND,
259 HCI_CONN_SCO_SETUP_PEND,
260 };
261
262 static inline void hci_conn_hash_init(struct hci_dev *hdev)
263 {
264 struct hci_conn_hash *h = &hdev->conn_hash;
265 INIT_LIST_HEAD(&h->list);
266 spin_lock_init(&h->lock);
267 h->acl_num = 0;
268 h->sco_num = 0;
269 }
270
271 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
272 {
273 struct hci_conn_hash *h = &hdev->conn_hash;
274 list_add(&c->list, &h->list);
275 if (c->type == ACL_LINK)
276 h->acl_num++;
277 else
278 h->sco_num++;
279 }
280
281 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
282 {
283 struct hci_conn_hash *h = &hdev->conn_hash;
284 list_del(&c->list);
285 if (c->type == ACL_LINK)
286 h->acl_num--;
287 else
288 h->sco_num--;
289 }
290
291 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
292 __u16 handle)
293 {
294 struct hci_conn_hash *h = &hdev->conn_hash;
295 struct list_head *p;
296 struct hci_conn *c;
297
298 list_for_each(p, &h->list) {
299 c = list_entry(p, struct hci_conn, list);
300 if (c->handle == handle)
301 return c;
302 }
303 return NULL;
304 }
305
306 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
307 __u8 type, bdaddr_t *ba)
308 {
309 struct hci_conn_hash *h = &hdev->conn_hash;
310 struct list_head *p;
311 struct hci_conn *c;
312
313 list_for_each(p, &h->list) {
314 c = list_entry(p, struct hci_conn, list);
315 if (c->type == type && !bacmp(&c->dst, ba))
316 return c;
317 }
318 return NULL;
319 }
320
321 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
322 __u8 type, __u16 state)
323 {
324 struct hci_conn_hash *h = &hdev->conn_hash;
325 struct list_head *p;
326 struct hci_conn *c;
327
328 list_for_each(p, &h->list) {
329 c = list_entry(p, struct hci_conn, list);
330 if (c->type == type && c->state == state)
331 return c;
332 }
333 return NULL;
334 }
335
336 void hci_acl_connect(struct hci_conn *conn);
337 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
338 void hci_add_sco(struct hci_conn *conn, __u16 handle);
339 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
340 void hci_sco_setup(struct hci_conn *conn, __u8 status);
341
342 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
343 int hci_conn_del(struct hci_conn *conn);
344 void hci_conn_hash_flush(struct hci_dev *hdev);
345 void hci_conn_check_pending(struct hci_dev *hdev);
346
347 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
348 int hci_conn_check_link_mode(struct hci_conn *conn);
349 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
350 int hci_conn_change_link_key(struct hci_conn *conn);
351 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
352
353 void hci_conn_enter_active_mode(struct hci_conn *conn);
354 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
355
356 void hci_conn_hold_device(struct hci_conn *conn);
357 void hci_conn_put_device(struct hci_conn *conn);
358
359 static inline void hci_conn_hold(struct hci_conn *conn)
360 {
361 atomic_inc(&conn->refcnt);
362 del_timer(&conn->disc_timer);
363 }
364
365 static inline void hci_conn_put(struct hci_conn *conn)
366 {
367 if (atomic_dec_and_test(&conn->refcnt)) {
368 unsigned long timeo;
369 if (conn->type == ACL_LINK) {
370 del_timer(&conn->idle_timer);
371 if (conn->state == BT_CONNECTED) {
372 timeo = msecs_to_jiffies(conn->disc_timeout);
373 if (!conn->out)
374 timeo *= 2;
375 } else
376 timeo = msecs_to_jiffies(10);
377 } else
378 timeo = msecs_to_jiffies(10);
379 mod_timer(&conn->disc_timer, jiffies + timeo);
380 }
381 }
382
383 /* ----- HCI Devices ----- */
384 static inline void __hci_dev_put(struct hci_dev *d)
385 {
386 if (atomic_dec_and_test(&d->refcnt))
387 d->destruct(d);
388 }
389
390 static inline void hci_dev_put(struct hci_dev *d)
391 {
392 __hci_dev_put(d);
393 module_put(d->owner);
394 }
395
396 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
397 {
398 atomic_inc(&d->refcnt);
399 return d;
400 }
401
402 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
403 {
404 if (try_module_get(d->owner))
405 return __hci_dev_hold(d);
406 return NULL;
407 }
408
409 #define hci_dev_lock(d) spin_lock(&d->lock)
410 #define hci_dev_unlock(d) spin_unlock(&d->lock)
411 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
412 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
413
414 struct hci_dev *hci_dev_get(int index);
415 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
416
417 struct hci_dev *hci_alloc_dev(void);
418 void hci_free_dev(struct hci_dev *hdev);
419 int hci_register_dev(struct hci_dev *hdev);
420 int hci_unregister_dev(struct hci_dev *hdev);
421 int hci_suspend_dev(struct hci_dev *hdev);
422 int hci_resume_dev(struct hci_dev *hdev);
423 int hci_dev_open(__u16 dev);
424 int hci_dev_close(__u16 dev);
425 int hci_dev_reset(__u16 dev);
426 int hci_dev_reset_stat(__u16 dev);
427 int hci_dev_cmd(unsigned int cmd, void __user *arg);
428 int hci_get_dev_list(void __user *arg);
429 int hci_get_dev_info(void __user *arg);
430 int hci_get_conn_list(void __user *arg);
431 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
432 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
433 int hci_inquiry(void __user *arg);
434
435 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
436 int hci_blacklist_clear(struct hci_dev *hdev);
437
438 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
439
440 int hci_recv_frame(struct sk_buff *skb);
441 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
442 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
443
444 int hci_register_sysfs(struct hci_dev *hdev);
445 void hci_unregister_sysfs(struct hci_dev *hdev);
446 void hci_conn_init_sysfs(struct hci_conn *conn);
447 void hci_conn_add_sysfs(struct hci_conn *conn);
448 void hci_conn_del_sysfs(struct hci_conn *conn);
449
450 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
451
452 /* ----- LMP capabilities ----- */
453 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
454 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
455 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
456 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
457 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
458 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
459
460 /* ----- HCI protocols ----- */
461 struct hci_proto {
462 char *name;
463 unsigned int id;
464 unsigned long flags;
465
466 void *priv;
467
468 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
469 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
470 int (*disconn_ind) (struct hci_conn *conn);
471 int (*disconn_cfm) (struct hci_conn *conn, __u8 reason);
472 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
473 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
474 int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
475 };
476
477 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
478 {
479 register struct hci_proto *hp;
480 int mask = 0;
481
482 hp = hci_proto[HCI_PROTO_L2CAP];
483 if (hp && hp->connect_ind)
484 mask |= hp->connect_ind(hdev, bdaddr, type);
485
486 hp = hci_proto[HCI_PROTO_SCO];
487 if (hp && hp->connect_ind)
488 mask |= hp->connect_ind(hdev, bdaddr, type);
489
490 return mask;
491 }
492
493 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
494 {
495 register struct hci_proto *hp;
496
497 hp = hci_proto[HCI_PROTO_L2CAP];
498 if (hp && hp->connect_cfm)
499 hp->connect_cfm(conn, status);
500
501 hp = hci_proto[HCI_PROTO_SCO];
502 if (hp && hp->connect_cfm)
503 hp->connect_cfm(conn, status);
504 }
505
506 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
507 {
508 register struct hci_proto *hp;
509 int reason = 0x13;
510
511 hp = hci_proto[HCI_PROTO_L2CAP];
512 if (hp && hp->disconn_ind)
513 reason = hp->disconn_ind(conn);
514
515 hp = hci_proto[HCI_PROTO_SCO];
516 if (hp && hp->disconn_ind)
517 reason = hp->disconn_ind(conn);
518
519 return reason;
520 }
521
522 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
523 {
524 register struct hci_proto *hp;
525
526 hp = hci_proto[HCI_PROTO_L2CAP];
527 if (hp && hp->disconn_cfm)
528 hp->disconn_cfm(conn, reason);
529
530 hp = hci_proto[HCI_PROTO_SCO];
531 if (hp && hp->disconn_cfm)
532 hp->disconn_cfm(conn, reason);
533 }
534
535 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
536 {
537 register struct hci_proto *hp;
538 __u8 encrypt;
539
540 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
541 return;
542
543 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
544
545 hp = hci_proto[HCI_PROTO_L2CAP];
546 if (hp && hp->security_cfm)
547 hp->security_cfm(conn, status, encrypt);
548
549 hp = hci_proto[HCI_PROTO_SCO];
550 if (hp && hp->security_cfm)
551 hp->security_cfm(conn, status, encrypt);
552 }
553
554 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
555 {
556 register struct hci_proto *hp;
557
558 hp = hci_proto[HCI_PROTO_L2CAP];
559 if (hp && hp->security_cfm)
560 hp->security_cfm(conn, status, encrypt);
561
562 hp = hci_proto[HCI_PROTO_SCO];
563 if (hp && hp->security_cfm)
564 hp->security_cfm(conn, status, encrypt);
565 }
566
567 int hci_register_proto(struct hci_proto *hproto);
568 int hci_unregister_proto(struct hci_proto *hproto);
569
570 /* ----- HCI callbacks ----- */
571 struct hci_cb {
572 struct list_head list;
573
574 char *name;
575
576 void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
577 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
578 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
579 };
580
581 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
582 {
583 struct list_head *p;
584 __u8 encrypt;
585
586 hci_proto_auth_cfm(conn, status);
587
588 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
589 return;
590
591 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
592
593 read_lock_bh(&hci_cb_list_lock);
594 list_for_each(p, &hci_cb_list) {
595 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
596 if (cb->security_cfm)
597 cb->security_cfm(conn, status, encrypt);
598 }
599 read_unlock_bh(&hci_cb_list_lock);
600 }
601
602 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
603 {
604 struct list_head *p;
605
606 if (conn->sec_level == BT_SECURITY_SDP)
607 conn->sec_level = BT_SECURITY_LOW;
608
609 hci_proto_encrypt_cfm(conn, status, encrypt);
610
611 read_lock_bh(&hci_cb_list_lock);
612 list_for_each(p, &hci_cb_list) {
613 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
614 if (cb->security_cfm)
615 cb->security_cfm(conn, status, encrypt);
616 }
617 read_unlock_bh(&hci_cb_list_lock);
618 }
619
620 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
621 {
622 struct list_head *p;
623
624 read_lock_bh(&hci_cb_list_lock);
625 list_for_each(p, &hci_cb_list) {
626 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
627 if (cb->key_change_cfm)
628 cb->key_change_cfm(conn, status);
629 }
630 read_unlock_bh(&hci_cb_list_lock);
631 }
632
633 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
634 {
635 struct list_head *p;
636
637 read_lock_bh(&hci_cb_list_lock);
638 list_for_each(p, &hci_cb_list) {
639 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
640 if (cb->role_switch_cfm)
641 cb->role_switch_cfm(conn, status, role);
642 }
643 read_unlock_bh(&hci_cb_list_lock);
644 }
645
646 int hci_register_cb(struct hci_cb *hcb);
647 int hci_unregister_cb(struct hci_cb *hcb);
648
649 int hci_register_notifier(struct notifier_block *nb);
650 int hci_unregister_notifier(struct notifier_block *nb);
651
652 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
653 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
654 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
655
656 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
657
658 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
659
660 /* ----- HCI Sockets ----- */
661 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
662
663 /* HCI info for socket */
664 #define hci_pi(sk) ((struct hci_pinfo *) sk)
665
666 struct hci_pinfo {
667 struct bt_sock bt;
668 struct hci_dev *hdev;
669 struct hci_filter filter;
670 __u32 cmsg_mask;
671 };
672
673 /* HCI security filter */
674 #define HCI_SFLT_MAX_OGF 5
675
676 struct hci_sec_filter {
677 __u32 type_mask;
678 __u32 event_mask[2];
679 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
680 };
681
682 /* ----- HCI requests ----- */
683 #define HCI_REQ_DONE 0
684 #define HCI_REQ_PEND 1
685 #define HCI_REQ_CANCELED 2
686
687 #define hci_req_lock(d) mutex_lock(&d->req_lock)
688 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
689
690 void hci_req_complete(struct hci_dev *hdev, int result);
691
692 #endif /* __HCI_CORE_H */