Bluetooth: Reject pairing requests when in non-pairable mode
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / net / bluetooth / hci_core.h
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27
28 #include <net/bluetooth/hci.h>
29
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
33
34 /* HCI Core structures */
35 struct inquiry_data {
36 bdaddr_t bdaddr;
37 __u8 pscan_rep_mode;
38 __u8 pscan_period_mode;
39 __u8 pscan_mode;
40 __u8 dev_class[3];
41 __le16 clock_offset;
42 __s8 rssi;
43 __u8 ssp_mode;
44 };
45
46 struct inquiry_entry {
47 struct inquiry_entry *next;
48 __u32 timestamp;
49 struct inquiry_data data;
50 };
51
52 struct inquiry_cache {
53 spinlock_t lock;
54 __u32 timestamp;
55 struct inquiry_entry *list;
56 };
57
58 struct hci_conn_hash {
59 struct list_head list;
60 spinlock_t lock;
61 unsigned int acl_num;
62 unsigned int sco_num;
63 };
64
65 struct bdaddr_list {
66 struct list_head list;
67 bdaddr_t bdaddr;
68 };
69
70 struct bt_uuid {
71 struct list_head list;
72 u8 uuid[16];
73 };
74
75 #define NUM_REASSEMBLY 4
76 struct hci_dev {
77 struct list_head list;
78 spinlock_t lock;
79 atomic_t refcnt;
80
81 char name[8];
82 unsigned long flags;
83 __u16 id;
84 __u8 bus;
85 __u8 dev_type;
86 bdaddr_t bdaddr;
87 __u8 dev_name[248];
88 __u8 dev_class[3];
89 __u8 features[8];
90 __u8 commands[64];
91 __u8 ssp_mode;
92 __u8 hci_ver;
93 __u16 hci_rev;
94 __u16 manufacturer;
95 __u16 voice_setting;
96
97 __u16 pkt_type;
98 __u16 esco_type;
99 __u16 link_policy;
100 __u16 link_mode;
101
102 __u32 idle_timeout;
103 __u16 sniff_min_interval;
104 __u16 sniff_max_interval;
105
106 unsigned long quirks;
107
108 atomic_t cmd_cnt;
109 unsigned int acl_cnt;
110 unsigned int sco_cnt;
111
112 unsigned int acl_mtu;
113 unsigned int sco_mtu;
114 unsigned int acl_pkts;
115 unsigned int sco_pkts;
116
117 unsigned long cmd_last_tx;
118 unsigned long acl_last_tx;
119 unsigned long sco_last_tx;
120
121 struct workqueue_struct *workqueue;
122
123 struct work_struct power_on;
124 struct work_struct power_off;
125 struct timer_list off_timer;
126
127 struct tasklet_struct cmd_task;
128 struct tasklet_struct rx_task;
129 struct tasklet_struct tx_task;
130
131 struct sk_buff_head rx_q;
132 struct sk_buff_head raw_q;
133 struct sk_buff_head cmd_q;
134
135 struct sk_buff *sent_cmd;
136 struct sk_buff *reassembly[NUM_REASSEMBLY];
137
138 struct mutex req_lock;
139 wait_queue_head_t req_wait_q;
140 __u32 req_status;
141 __u32 req_result;
142 __u16 req_last_cmd;
143
144 struct inquiry_cache inq_cache;
145 struct hci_conn_hash conn_hash;
146 struct list_head blacklist;
147
148 struct list_head uuids;
149
150 struct hci_dev_stats stat;
151
152 struct sk_buff_head driver_init;
153
154 void *driver_data;
155 void *core_data;
156
157 atomic_t promisc;
158
159 struct dentry *debugfs;
160
161 struct device *parent;
162 struct device dev;
163
164 struct rfkill *rfkill;
165
166 struct module *owner;
167
168 int (*open)(struct hci_dev *hdev);
169 int (*close)(struct hci_dev *hdev);
170 int (*flush)(struct hci_dev *hdev);
171 int (*send)(struct sk_buff *skb);
172 void (*destruct)(struct hci_dev *hdev);
173 void (*notify)(struct hci_dev *hdev, unsigned int evt);
174 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
175 };
176
177 struct hci_conn {
178 struct list_head list;
179
180 atomic_t refcnt;
181 spinlock_t lock;
182
183 bdaddr_t dst;
184 __u16 handle;
185 __u16 state;
186 __u8 mode;
187 __u8 type;
188 __u8 out;
189 __u8 attempt;
190 __u8 dev_class[3];
191 __u8 features[8];
192 __u8 ssp_mode;
193 __u16 interval;
194 __u16 pkt_type;
195 __u16 link_policy;
196 __u32 link_mode;
197 __u8 auth_type;
198 __u8 sec_level;
199 __u8 pending_sec_level;
200 __u8 power_save;
201 __u16 disc_timeout;
202 unsigned long pend;
203
204 __u8 remote_cap;
205 __u8 remote_oob;
206 __u8 remote_auth;
207
208 unsigned int sent;
209
210 struct sk_buff_head data_q;
211
212 struct timer_list disc_timer;
213 struct timer_list idle_timer;
214
215 struct work_struct work_add;
216 struct work_struct work_del;
217
218 struct device dev;
219 atomic_t devref;
220
221 struct hci_dev *hdev;
222 void *l2cap_data;
223 void *sco_data;
224 void *priv;
225
226 struct hci_conn *link;
227 };
228
229 extern struct hci_proto *hci_proto[];
230 extern struct list_head hci_dev_list;
231 extern struct list_head hci_cb_list;
232 extern rwlock_t hci_dev_list_lock;
233 extern rwlock_t hci_cb_list_lock;
234
235 /* ----- Inquiry cache ----- */
236 #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
237 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
238
239 #define inquiry_cache_lock(c) spin_lock(&c->lock)
240 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
241 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
242 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
243
244 static inline void inquiry_cache_init(struct hci_dev *hdev)
245 {
246 struct inquiry_cache *c = &hdev->inq_cache;
247 spin_lock_init(&c->lock);
248 c->list = NULL;
249 }
250
251 static inline int inquiry_cache_empty(struct hci_dev *hdev)
252 {
253 struct inquiry_cache *c = &hdev->inq_cache;
254 return c->list == NULL;
255 }
256
257 static inline long inquiry_cache_age(struct hci_dev *hdev)
258 {
259 struct inquiry_cache *c = &hdev->inq_cache;
260 return jiffies - c->timestamp;
261 }
262
263 static inline long inquiry_entry_age(struct inquiry_entry *e)
264 {
265 return jiffies - e->timestamp;
266 }
267
268 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
269 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
270
271 /* ----- HCI Connections ----- */
272 enum {
273 HCI_CONN_AUTH_PEND,
274 HCI_CONN_ENCRYPT_PEND,
275 HCI_CONN_RSWITCH_PEND,
276 HCI_CONN_MODE_CHANGE_PEND,
277 HCI_CONN_SCO_SETUP_PEND,
278 };
279
280 static inline void hci_conn_hash_init(struct hci_dev *hdev)
281 {
282 struct hci_conn_hash *h = &hdev->conn_hash;
283 INIT_LIST_HEAD(&h->list);
284 spin_lock_init(&h->lock);
285 h->acl_num = 0;
286 h->sco_num = 0;
287 }
288
289 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
290 {
291 struct hci_conn_hash *h = &hdev->conn_hash;
292 list_add(&c->list, &h->list);
293 if (c->type == ACL_LINK)
294 h->acl_num++;
295 else
296 h->sco_num++;
297 }
298
299 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
300 {
301 struct hci_conn_hash *h = &hdev->conn_hash;
302 list_del(&c->list);
303 if (c->type == ACL_LINK)
304 h->acl_num--;
305 else
306 h->sco_num--;
307 }
308
309 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
310 __u16 handle)
311 {
312 struct hci_conn_hash *h = &hdev->conn_hash;
313 struct list_head *p;
314 struct hci_conn *c;
315
316 list_for_each(p, &h->list) {
317 c = list_entry(p, struct hci_conn, list);
318 if (c->handle == handle)
319 return c;
320 }
321 return NULL;
322 }
323
324 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
325 __u8 type, bdaddr_t *ba)
326 {
327 struct hci_conn_hash *h = &hdev->conn_hash;
328 struct list_head *p;
329 struct hci_conn *c;
330
331 list_for_each(p, &h->list) {
332 c = list_entry(p, struct hci_conn, list);
333 if (c->type == type && !bacmp(&c->dst, ba))
334 return c;
335 }
336 return NULL;
337 }
338
339 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
340 __u8 type, __u16 state)
341 {
342 struct hci_conn_hash *h = &hdev->conn_hash;
343 struct list_head *p;
344 struct hci_conn *c;
345
346 list_for_each(p, &h->list) {
347 c = list_entry(p, struct hci_conn, list);
348 if (c->type == type && c->state == state)
349 return c;
350 }
351 return NULL;
352 }
353
354 void hci_acl_connect(struct hci_conn *conn);
355 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
356 void hci_add_sco(struct hci_conn *conn, __u16 handle);
357 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
358 void hci_sco_setup(struct hci_conn *conn, __u8 status);
359
360 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
361 int hci_conn_del(struct hci_conn *conn);
362 void hci_conn_hash_flush(struct hci_dev *hdev);
363 void hci_conn_check_pending(struct hci_dev *hdev);
364
365 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
366 int hci_conn_check_link_mode(struct hci_conn *conn);
367 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
368 int hci_conn_change_link_key(struct hci_conn *conn);
369 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
370
371 void hci_conn_enter_active_mode(struct hci_conn *conn);
372 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
373
374 void hci_conn_hold_device(struct hci_conn *conn);
375 void hci_conn_put_device(struct hci_conn *conn);
376
377 static inline void hci_conn_hold(struct hci_conn *conn)
378 {
379 atomic_inc(&conn->refcnt);
380 del_timer(&conn->disc_timer);
381 }
382
383 static inline void hci_conn_put(struct hci_conn *conn)
384 {
385 if (atomic_dec_and_test(&conn->refcnt)) {
386 unsigned long timeo;
387 if (conn->type == ACL_LINK) {
388 del_timer(&conn->idle_timer);
389 if (conn->state == BT_CONNECTED) {
390 timeo = msecs_to_jiffies(conn->disc_timeout);
391 if (!conn->out)
392 timeo *= 2;
393 } else
394 timeo = msecs_to_jiffies(10);
395 } else
396 timeo = msecs_to_jiffies(10);
397 mod_timer(&conn->disc_timer, jiffies + timeo);
398 }
399 }
400
401 /* ----- HCI Devices ----- */
402 static inline void __hci_dev_put(struct hci_dev *d)
403 {
404 if (atomic_dec_and_test(&d->refcnt))
405 d->destruct(d);
406 }
407
408 static inline void hci_dev_put(struct hci_dev *d)
409 {
410 __hci_dev_put(d);
411 module_put(d->owner);
412 }
413
414 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
415 {
416 atomic_inc(&d->refcnt);
417 return d;
418 }
419
420 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
421 {
422 if (try_module_get(d->owner))
423 return __hci_dev_hold(d);
424 return NULL;
425 }
426
427 #define hci_dev_lock(d) spin_lock(&d->lock)
428 #define hci_dev_unlock(d) spin_unlock(&d->lock)
429 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
430 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
431
432 struct hci_dev *hci_dev_get(int index);
433 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
434
435 struct hci_dev *hci_alloc_dev(void);
436 void hci_free_dev(struct hci_dev *hdev);
437 int hci_register_dev(struct hci_dev *hdev);
438 int hci_unregister_dev(struct hci_dev *hdev);
439 int hci_suspend_dev(struct hci_dev *hdev);
440 int hci_resume_dev(struct hci_dev *hdev);
441 int hci_dev_open(__u16 dev);
442 int hci_dev_close(__u16 dev);
443 int hci_dev_reset(__u16 dev);
444 int hci_dev_reset_stat(__u16 dev);
445 int hci_dev_cmd(unsigned int cmd, void __user *arg);
446 int hci_get_dev_list(void __user *arg);
447 int hci_get_dev_info(void __user *arg);
448 int hci_get_conn_list(void __user *arg);
449 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
450 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
451 int hci_inquiry(void __user *arg);
452
453 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
454 int hci_blacklist_clear(struct hci_dev *hdev);
455
456 int hci_uuids_clear(struct hci_dev *hdev);
457
458 void hci_del_off_timer(struct hci_dev *hdev);
459
460 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
461
462 int hci_recv_frame(struct sk_buff *skb);
463 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
464 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
465
466 int hci_register_sysfs(struct hci_dev *hdev);
467 void hci_unregister_sysfs(struct hci_dev *hdev);
468 void hci_conn_init_sysfs(struct hci_conn *conn);
469 void hci_conn_add_sysfs(struct hci_conn *conn);
470 void hci_conn_del_sysfs(struct hci_conn *conn);
471
472 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
473
474 /* ----- LMP capabilities ----- */
475 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
476 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
477 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
478 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
479 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
480 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
481 #define lmp_no_flush_capable(dev) ((dev)->features[6] & LMP_NO_FLUSH)
482
483 /* ----- HCI protocols ----- */
484 struct hci_proto {
485 char *name;
486 unsigned int id;
487 unsigned long flags;
488
489 void *priv;
490
491 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
492 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
493 int (*disconn_ind) (struct hci_conn *conn);
494 int (*disconn_cfm) (struct hci_conn *conn, __u8 reason);
495 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
496 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
497 int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
498 };
499
500 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
501 {
502 register struct hci_proto *hp;
503 int mask = 0;
504
505 hp = hci_proto[HCI_PROTO_L2CAP];
506 if (hp && hp->connect_ind)
507 mask |= hp->connect_ind(hdev, bdaddr, type);
508
509 hp = hci_proto[HCI_PROTO_SCO];
510 if (hp && hp->connect_ind)
511 mask |= hp->connect_ind(hdev, bdaddr, type);
512
513 return mask;
514 }
515
516 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
517 {
518 register struct hci_proto *hp;
519
520 hp = hci_proto[HCI_PROTO_L2CAP];
521 if (hp && hp->connect_cfm)
522 hp->connect_cfm(conn, status);
523
524 hp = hci_proto[HCI_PROTO_SCO];
525 if (hp && hp->connect_cfm)
526 hp->connect_cfm(conn, status);
527 }
528
529 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
530 {
531 register struct hci_proto *hp;
532 int reason = 0x13;
533
534 hp = hci_proto[HCI_PROTO_L2CAP];
535 if (hp && hp->disconn_ind)
536 reason = hp->disconn_ind(conn);
537
538 hp = hci_proto[HCI_PROTO_SCO];
539 if (hp && hp->disconn_ind)
540 reason = hp->disconn_ind(conn);
541
542 return reason;
543 }
544
545 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
546 {
547 register struct hci_proto *hp;
548
549 hp = hci_proto[HCI_PROTO_L2CAP];
550 if (hp && hp->disconn_cfm)
551 hp->disconn_cfm(conn, reason);
552
553 hp = hci_proto[HCI_PROTO_SCO];
554 if (hp && hp->disconn_cfm)
555 hp->disconn_cfm(conn, reason);
556 }
557
558 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
559 {
560 register struct hci_proto *hp;
561 __u8 encrypt;
562
563 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
564 return;
565
566 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
567
568 hp = hci_proto[HCI_PROTO_L2CAP];
569 if (hp && hp->security_cfm)
570 hp->security_cfm(conn, status, encrypt);
571
572 hp = hci_proto[HCI_PROTO_SCO];
573 if (hp && hp->security_cfm)
574 hp->security_cfm(conn, status, encrypt);
575 }
576
577 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
578 {
579 register struct hci_proto *hp;
580
581 hp = hci_proto[HCI_PROTO_L2CAP];
582 if (hp && hp->security_cfm)
583 hp->security_cfm(conn, status, encrypt);
584
585 hp = hci_proto[HCI_PROTO_SCO];
586 if (hp && hp->security_cfm)
587 hp->security_cfm(conn, status, encrypt);
588 }
589
590 int hci_register_proto(struct hci_proto *hproto);
591 int hci_unregister_proto(struct hci_proto *hproto);
592
593 /* ----- HCI callbacks ----- */
594 struct hci_cb {
595 struct list_head list;
596
597 char *name;
598
599 void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
600 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
601 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
602 };
603
604 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
605 {
606 struct list_head *p;
607 __u8 encrypt;
608
609 hci_proto_auth_cfm(conn, status);
610
611 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
612 return;
613
614 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
615
616 read_lock_bh(&hci_cb_list_lock);
617 list_for_each(p, &hci_cb_list) {
618 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
619 if (cb->security_cfm)
620 cb->security_cfm(conn, status, encrypt);
621 }
622 read_unlock_bh(&hci_cb_list_lock);
623 }
624
625 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
626 {
627 struct list_head *p;
628
629 if (conn->sec_level == BT_SECURITY_SDP)
630 conn->sec_level = BT_SECURITY_LOW;
631
632 hci_proto_encrypt_cfm(conn, status, encrypt);
633
634 read_lock_bh(&hci_cb_list_lock);
635 list_for_each(p, &hci_cb_list) {
636 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
637 if (cb->security_cfm)
638 cb->security_cfm(conn, status, encrypt);
639 }
640 read_unlock_bh(&hci_cb_list_lock);
641 }
642
643 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
644 {
645 struct list_head *p;
646
647 read_lock_bh(&hci_cb_list_lock);
648 list_for_each(p, &hci_cb_list) {
649 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
650 if (cb->key_change_cfm)
651 cb->key_change_cfm(conn, status);
652 }
653 read_unlock_bh(&hci_cb_list_lock);
654 }
655
656 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
657 {
658 struct list_head *p;
659
660 read_lock_bh(&hci_cb_list_lock);
661 list_for_each(p, &hci_cb_list) {
662 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
663 if (cb->role_switch_cfm)
664 cb->role_switch_cfm(conn, status, role);
665 }
666 read_unlock_bh(&hci_cb_list_lock);
667 }
668
669 int hci_register_cb(struct hci_cb *hcb);
670 int hci_unregister_cb(struct hci_cb *hcb);
671
672 int hci_register_notifier(struct notifier_block *nb);
673 int hci_unregister_notifier(struct notifier_block *nb);
674
675 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
676 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
677 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
678
679 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
680
681 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
682
683 /* ----- HCI Sockets ----- */
684 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
685 struct sock *skip_sk);
686
687 /* Management interface */
688 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
689 int mgmt_index_added(u16 index);
690 int mgmt_index_removed(u16 index);
691 int mgmt_powered(u16 index, u8 powered);
692 int mgmt_discoverable(u16 index, u8 discoverable);
693 int mgmt_connectable(u16 index, u8 connectable);
694
695 /* HCI info for socket */
696 #define hci_pi(sk) ((struct hci_pinfo *) sk)
697
698 struct hci_pinfo {
699 struct bt_sock bt;
700 struct hci_dev *hdev;
701 struct hci_filter filter;
702 __u32 cmsg_mask;
703 unsigned short channel;
704 };
705
706 /* HCI security filter */
707 #define HCI_SFLT_MAX_OGF 5
708
709 struct hci_sec_filter {
710 __u32 type_mask;
711 __u32 event_mask[2];
712 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
713 };
714
715 /* ----- HCI requests ----- */
716 #define HCI_REQ_DONE 0
717 #define HCI_REQ_PEND 1
718 #define HCI_REQ_CANCELED 2
719
720 #define hci_req_lock(d) mutex_lock(&d->req_lock)
721 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
722
723 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result);
724
725 #endif /* __HCI_CORE_H */