Bluetooth: Add support for set_powered management command
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / net / bluetooth / hci_core.h
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27
28 #include <net/bluetooth/hci.h>
29
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
33
34 /* HCI Core structures */
35 struct inquiry_data {
36 bdaddr_t bdaddr;
37 __u8 pscan_rep_mode;
38 __u8 pscan_period_mode;
39 __u8 pscan_mode;
40 __u8 dev_class[3];
41 __le16 clock_offset;
42 __s8 rssi;
43 __u8 ssp_mode;
44 };
45
46 struct inquiry_entry {
47 struct inquiry_entry *next;
48 __u32 timestamp;
49 struct inquiry_data data;
50 };
51
52 struct inquiry_cache {
53 spinlock_t lock;
54 __u32 timestamp;
55 struct inquiry_entry *list;
56 };
57
58 struct hci_conn_hash {
59 struct list_head list;
60 spinlock_t lock;
61 unsigned int acl_num;
62 unsigned int sco_num;
63 };
64
65 struct bdaddr_list {
66 struct list_head list;
67 bdaddr_t bdaddr;
68 };
69 #define NUM_REASSEMBLY 4
70 struct hci_dev {
71 struct list_head list;
72 spinlock_t lock;
73 atomic_t refcnt;
74
75 char name[8];
76 unsigned long flags;
77 __u16 id;
78 __u8 bus;
79 __u8 dev_type;
80 bdaddr_t bdaddr;
81 __u8 dev_name[248];
82 __u8 dev_class[3];
83 __u8 features[8];
84 __u8 commands[64];
85 __u8 ssp_mode;
86 __u8 hci_ver;
87 __u16 hci_rev;
88 __u16 manufacturer;
89 __u16 voice_setting;
90
91 __u16 pkt_type;
92 __u16 esco_type;
93 __u16 link_policy;
94 __u16 link_mode;
95
96 __u32 idle_timeout;
97 __u16 sniff_min_interval;
98 __u16 sniff_max_interval;
99
100 unsigned long quirks;
101
102 atomic_t cmd_cnt;
103 unsigned int acl_cnt;
104 unsigned int sco_cnt;
105
106 unsigned int acl_mtu;
107 unsigned int sco_mtu;
108 unsigned int acl_pkts;
109 unsigned int sco_pkts;
110
111 unsigned long cmd_last_tx;
112 unsigned long acl_last_tx;
113 unsigned long sco_last_tx;
114
115 struct workqueue_struct *workqueue;
116
117 struct work_struct power_on;
118 struct work_struct power_off;
119 struct timer_list off_timer;
120
121 struct tasklet_struct cmd_task;
122 struct tasklet_struct rx_task;
123 struct tasklet_struct tx_task;
124
125 struct sk_buff_head rx_q;
126 struct sk_buff_head raw_q;
127 struct sk_buff_head cmd_q;
128
129 struct sk_buff *sent_cmd;
130 struct sk_buff *reassembly[NUM_REASSEMBLY];
131
132 struct mutex req_lock;
133 wait_queue_head_t req_wait_q;
134 __u32 req_status;
135 __u32 req_result;
136 __u16 req_last_cmd;
137
138 struct inquiry_cache inq_cache;
139 struct hci_conn_hash conn_hash;
140 struct list_head blacklist;
141
142 struct hci_dev_stats stat;
143
144 struct sk_buff_head driver_init;
145
146 void *driver_data;
147 void *core_data;
148
149 atomic_t promisc;
150
151 struct dentry *debugfs;
152
153 struct device *parent;
154 struct device dev;
155
156 struct rfkill *rfkill;
157
158 struct module *owner;
159
160 int (*open)(struct hci_dev *hdev);
161 int (*close)(struct hci_dev *hdev);
162 int (*flush)(struct hci_dev *hdev);
163 int (*send)(struct sk_buff *skb);
164 void (*destruct)(struct hci_dev *hdev);
165 void (*notify)(struct hci_dev *hdev, unsigned int evt);
166 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
167 };
168
169 struct hci_conn {
170 struct list_head list;
171
172 atomic_t refcnt;
173 spinlock_t lock;
174
175 bdaddr_t dst;
176 __u16 handle;
177 __u16 state;
178 __u8 mode;
179 __u8 type;
180 __u8 out;
181 __u8 attempt;
182 __u8 dev_class[3];
183 __u8 features[8];
184 __u8 ssp_mode;
185 __u16 interval;
186 __u16 pkt_type;
187 __u16 link_policy;
188 __u32 link_mode;
189 __u8 auth_type;
190 __u8 sec_level;
191 __u8 pending_sec_level;
192 __u8 power_save;
193 __u16 disc_timeout;
194 unsigned long pend;
195
196 unsigned int sent;
197
198 struct sk_buff_head data_q;
199
200 struct timer_list disc_timer;
201 struct timer_list idle_timer;
202
203 struct work_struct work_add;
204 struct work_struct work_del;
205
206 struct device dev;
207 atomic_t devref;
208
209 struct hci_dev *hdev;
210 void *l2cap_data;
211 void *sco_data;
212 void *priv;
213
214 struct hci_conn *link;
215 };
216
217 extern struct hci_proto *hci_proto[];
218 extern struct list_head hci_dev_list;
219 extern struct list_head hci_cb_list;
220 extern rwlock_t hci_dev_list_lock;
221 extern rwlock_t hci_cb_list_lock;
222
223 /* ----- Inquiry cache ----- */
224 #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
225 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
226
227 #define inquiry_cache_lock(c) spin_lock(&c->lock)
228 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
229 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
230 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
231
232 static inline void inquiry_cache_init(struct hci_dev *hdev)
233 {
234 struct inquiry_cache *c = &hdev->inq_cache;
235 spin_lock_init(&c->lock);
236 c->list = NULL;
237 }
238
239 static inline int inquiry_cache_empty(struct hci_dev *hdev)
240 {
241 struct inquiry_cache *c = &hdev->inq_cache;
242 return c->list == NULL;
243 }
244
245 static inline long inquiry_cache_age(struct hci_dev *hdev)
246 {
247 struct inquiry_cache *c = &hdev->inq_cache;
248 return jiffies - c->timestamp;
249 }
250
251 static inline long inquiry_entry_age(struct inquiry_entry *e)
252 {
253 return jiffies - e->timestamp;
254 }
255
256 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
257 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
258
259 /* ----- HCI Connections ----- */
260 enum {
261 HCI_CONN_AUTH_PEND,
262 HCI_CONN_ENCRYPT_PEND,
263 HCI_CONN_RSWITCH_PEND,
264 HCI_CONN_MODE_CHANGE_PEND,
265 HCI_CONN_SCO_SETUP_PEND,
266 };
267
268 static inline void hci_conn_hash_init(struct hci_dev *hdev)
269 {
270 struct hci_conn_hash *h = &hdev->conn_hash;
271 INIT_LIST_HEAD(&h->list);
272 spin_lock_init(&h->lock);
273 h->acl_num = 0;
274 h->sco_num = 0;
275 }
276
277 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
278 {
279 struct hci_conn_hash *h = &hdev->conn_hash;
280 list_add(&c->list, &h->list);
281 if (c->type == ACL_LINK)
282 h->acl_num++;
283 else
284 h->sco_num++;
285 }
286
287 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
288 {
289 struct hci_conn_hash *h = &hdev->conn_hash;
290 list_del(&c->list);
291 if (c->type == ACL_LINK)
292 h->acl_num--;
293 else
294 h->sco_num--;
295 }
296
297 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
298 __u16 handle)
299 {
300 struct hci_conn_hash *h = &hdev->conn_hash;
301 struct list_head *p;
302 struct hci_conn *c;
303
304 list_for_each(p, &h->list) {
305 c = list_entry(p, struct hci_conn, list);
306 if (c->handle == handle)
307 return c;
308 }
309 return NULL;
310 }
311
312 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
313 __u8 type, bdaddr_t *ba)
314 {
315 struct hci_conn_hash *h = &hdev->conn_hash;
316 struct list_head *p;
317 struct hci_conn *c;
318
319 list_for_each(p, &h->list) {
320 c = list_entry(p, struct hci_conn, list);
321 if (c->type == type && !bacmp(&c->dst, ba))
322 return c;
323 }
324 return NULL;
325 }
326
327 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
328 __u8 type, __u16 state)
329 {
330 struct hci_conn_hash *h = &hdev->conn_hash;
331 struct list_head *p;
332 struct hci_conn *c;
333
334 list_for_each(p, &h->list) {
335 c = list_entry(p, struct hci_conn, list);
336 if (c->type == type && c->state == state)
337 return c;
338 }
339 return NULL;
340 }
341
342 void hci_acl_connect(struct hci_conn *conn);
343 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
344 void hci_add_sco(struct hci_conn *conn, __u16 handle);
345 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
346 void hci_sco_setup(struct hci_conn *conn, __u8 status);
347
348 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
349 int hci_conn_del(struct hci_conn *conn);
350 void hci_conn_hash_flush(struct hci_dev *hdev);
351 void hci_conn_check_pending(struct hci_dev *hdev);
352
353 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
354 int hci_conn_check_link_mode(struct hci_conn *conn);
355 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
356 int hci_conn_change_link_key(struct hci_conn *conn);
357 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
358
359 void hci_conn_enter_active_mode(struct hci_conn *conn);
360 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
361
362 void hci_conn_hold_device(struct hci_conn *conn);
363 void hci_conn_put_device(struct hci_conn *conn);
364
365 static inline void hci_conn_hold(struct hci_conn *conn)
366 {
367 atomic_inc(&conn->refcnt);
368 del_timer(&conn->disc_timer);
369 }
370
371 static inline void hci_conn_put(struct hci_conn *conn)
372 {
373 if (atomic_dec_and_test(&conn->refcnt)) {
374 unsigned long timeo;
375 if (conn->type == ACL_LINK) {
376 del_timer(&conn->idle_timer);
377 if (conn->state == BT_CONNECTED) {
378 timeo = msecs_to_jiffies(conn->disc_timeout);
379 if (!conn->out)
380 timeo *= 2;
381 } else
382 timeo = msecs_to_jiffies(10);
383 } else
384 timeo = msecs_to_jiffies(10);
385 mod_timer(&conn->disc_timer, jiffies + timeo);
386 }
387 }
388
389 /* ----- HCI Devices ----- */
390 static inline void __hci_dev_put(struct hci_dev *d)
391 {
392 if (atomic_dec_and_test(&d->refcnt))
393 d->destruct(d);
394 }
395
396 static inline void hci_dev_put(struct hci_dev *d)
397 {
398 __hci_dev_put(d);
399 module_put(d->owner);
400 }
401
402 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
403 {
404 atomic_inc(&d->refcnt);
405 return d;
406 }
407
408 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
409 {
410 if (try_module_get(d->owner))
411 return __hci_dev_hold(d);
412 return NULL;
413 }
414
415 #define hci_dev_lock(d) spin_lock(&d->lock)
416 #define hci_dev_unlock(d) spin_unlock(&d->lock)
417 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
418 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
419
420 struct hci_dev *hci_dev_get(int index);
421 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
422
423 struct hci_dev *hci_alloc_dev(void);
424 void hci_free_dev(struct hci_dev *hdev);
425 int hci_register_dev(struct hci_dev *hdev);
426 int hci_unregister_dev(struct hci_dev *hdev);
427 int hci_suspend_dev(struct hci_dev *hdev);
428 int hci_resume_dev(struct hci_dev *hdev);
429 int hci_dev_open(__u16 dev);
430 int hci_dev_close(__u16 dev);
431 int hci_dev_reset(__u16 dev);
432 int hci_dev_reset_stat(__u16 dev);
433 int hci_dev_cmd(unsigned int cmd, void __user *arg);
434 int hci_get_dev_list(void __user *arg);
435 int hci_get_dev_info(void __user *arg);
436 int hci_get_conn_list(void __user *arg);
437 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
438 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
439 int hci_inquiry(void __user *arg);
440
441 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
442 int hci_blacklist_clear(struct hci_dev *hdev);
443
444 void hci_del_off_timer(struct hci_dev *hdev);
445
446 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
447
448 int hci_recv_frame(struct sk_buff *skb);
449 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
450 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
451
452 int hci_register_sysfs(struct hci_dev *hdev);
453 void hci_unregister_sysfs(struct hci_dev *hdev);
454 void hci_conn_init_sysfs(struct hci_conn *conn);
455 void hci_conn_add_sysfs(struct hci_conn *conn);
456 void hci_conn_del_sysfs(struct hci_conn *conn);
457
458 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
459
460 /* ----- LMP capabilities ----- */
461 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
462 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
463 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
464 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
465 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
466 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
467 #define lmp_no_flush_capable(dev) ((dev)->features[6] & LMP_NO_FLUSH)
468
469 /* ----- HCI protocols ----- */
470 struct hci_proto {
471 char *name;
472 unsigned int id;
473 unsigned long flags;
474
475 void *priv;
476
477 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
478 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
479 int (*disconn_ind) (struct hci_conn *conn);
480 int (*disconn_cfm) (struct hci_conn *conn, __u8 reason);
481 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
482 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
483 int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
484 };
485
486 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
487 {
488 register struct hci_proto *hp;
489 int mask = 0;
490
491 hp = hci_proto[HCI_PROTO_L2CAP];
492 if (hp && hp->connect_ind)
493 mask |= hp->connect_ind(hdev, bdaddr, type);
494
495 hp = hci_proto[HCI_PROTO_SCO];
496 if (hp && hp->connect_ind)
497 mask |= hp->connect_ind(hdev, bdaddr, type);
498
499 return mask;
500 }
501
502 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
503 {
504 register struct hci_proto *hp;
505
506 hp = hci_proto[HCI_PROTO_L2CAP];
507 if (hp && hp->connect_cfm)
508 hp->connect_cfm(conn, status);
509
510 hp = hci_proto[HCI_PROTO_SCO];
511 if (hp && hp->connect_cfm)
512 hp->connect_cfm(conn, status);
513 }
514
515 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
516 {
517 register struct hci_proto *hp;
518 int reason = 0x13;
519
520 hp = hci_proto[HCI_PROTO_L2CAP];
521 if (hp && hp->disconn_ind)
522 reason = hp->disconn_ind(conn);
523
524 hp = hci_proto[HCI_PROTO_SCO];
525 if (hp && hp->disconn_ind)
526 reason = hp->disconn_ind(conn);
527
528 return reason;
529 }
530
531 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
532 {
533 register struct hci_proto *hp;
534
535 hp = hci_proto[HCI_PROTO_L2CAP];
536 if (hp && hp->disconn_cfm)
537 hp->disconn_cfm(conn, reason);
538
539 hp = hci_proto[HCI_PROTO_SCO];
540 if (hp && hp->disconn_cfm)
541 hp->disconn_cfm(conn, reason);
542 }
543
544 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
545 {
546 register struct hci_proto *hp;
547 __u8 encrypt;
548
549 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
550 return;
551
552 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
553
554 hp = hci_proto[HCI_PROTO_L2CAP];
555 if (hp && hp->security_cfm)
556 hp->security_cfm(conn, status, encrypt);
557
558 hp = hci_proto[HCI_PROTO_SCO];
559 if (hp && hp->security_cfm)
560 hp->security_cfm(conn, status, encrypt);
561 }
562
563 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
564 {
565 register struct hci_proto *hp;
566
567 hp = hci_proto[HCI_PROTO_L2CAP];
568 if (hp && hp->security_cfm)
569 hp->security_cfm(conn, status, encrypt);
570
571 hp = hci_proto[HCI_PROTO_SCO];
572 if (hp && hp->security_cfm)
573 hp->security_cfm(conn, status, encrypt);
574 }
575
576 int hci_register_proto(struct hci_proto *hproto);
577 int hci_unregister_proto(struct hci_proto *hproto);
578
579 /* ----- HCI callbacks ----- */
580 struct hci_cb {
581 struct list_head list;
582
583 char *name;
584
585 void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
586 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
587 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
588 };
589
590 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
591 {
592 struct list_head *p;
593 __u8 encrypt;
594
595 hci_proto_auth_cfm(conn, status);
596
597 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
598 return;
599
600 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
601
602 read_lock_bh(&hci_cb_list_lock);
603 list_for_each(p, &hci_cb_list) {
604 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
605 if (cb->security_cfm)
606 cb->security_cfm(conn, status, encrypt);
607 }
608 read_unlock_bh(&hci_cb_list_lock);
609 }
610
611 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
612 {
613 struct list_head *p;
614
615 if (conn->sec_level == BT_SECURITY_SDP)
616 conn->sec_level = BT_SECURITY_LOW;
617
618 hci_proto_encrypt_cfm(conn, status, encrypt);
619
620 read_lock_bh(&hci_cb_list_lock);
621 list_for_each(p, &hci_cb_list) {
622 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
623 if (cb->security_cfm)
624 cb->security_cfm(conn, status, encrypt);
625 }
626 read_unlock_bh(&hci_cb_list_lock);
627 }
628
629 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
630 {
631 struct list_head *p;
632
633 read_lock_bh(&hci_cb_list_lock);
634 list_for_each(p, &hci_cb_list) {
635 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
636 if (cb->key_change_cfm)
637 cb->key_change_cfm(conn, status);
638 }
639 read_unlock_bh(&hci_cb_list_lock);
640 }
641
642 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
643 {
644 struct list_head *p;
645
646 read_lock_bh(&hci_cb_list_lock);
647 list_for_each(p, &hci_cb_list) {
648 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
649 if (cb->role_switch_cfm)
650 cb->role_switch_cfm(conn, status, role);
651 }
652 read_unlock_bh(&hci_cb_list_lock);
653 }
654
655 int hci_register_cb(struct hci_cb *hcb);
656 int hci_unregister_cb(struct hci_cb *hcb);
657
658 int hci_register_notifier(struct notifier_block *nb);
659 int hci_unregister_notifier(struct notifier_block *nb);
660
661 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
662 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
663 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
664
665 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
666
667 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
668
669 /* ----- HCI Sockets ----- */
670 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
671 struct sock *skip_sk);
672
673 /* Management interface */
674 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
675 int mgmt_index_added(u16 index);
676 int mgmt_index_removed(u16 index);
677 int mgmt_powered(u16 index, u8 powered);
678
679 /* HCI info for socket */
680 #define hci_pi(sk) ((struct hci_pinfo *) sk)
681
682 struct hci_pinfo {
683 struct bt_sock bt;
684 struct hci_dev *hdev;
685 struct hci_filter filter;
686 __u32 cmsg_mask;
687 unsigned short channel;
688 };
689
690 /* HCI security filter */
691 #define HCI_SFLT_MAX_OGF 5
692
693 struct hci_sec_filter {
694 __u32 type_mask;
695 __u32 event_mask[2];
696 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
697 };
698
699 /* ----- HCI requests ----- */
700 #define HCI_REQ_DONE 0
701 #define HCI_REQ_PEND 1
702 #define HCI_REQ_CANCELED 2
703
704 #define hci_req_lock(d) mutex_lock(&d->req_lock)
705 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
706
707 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result);
708
709 #endif /* __HCI_CORE_H */