[Bluetooth] Make use of virtual devices tree
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / net / bluetooth / hci_core.h
CommitLineData
1da177e4
LT
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25#ifndef __HCI_CORE_H
26#define __HCI_CORE_H
27
1da177e4
LT
28#include <net/bluetooth/hci.h>
29
30/* HCI upper protocols */
31#define HCI_PROTO_L2CAP 0
32#define HCI_PROTO_SCO 1
33
1da177e4 34/* HCI Core structures */
1da177e4
LT
35struct inquiry_data {
36 bdaddr_t bdaddr;
37 __u8 pscan_rep_mode;
38 __u8 pscan_period_mode;
39 __u8 pscan_mode;
40 __u8 dev_class[3];
1ebb9252 41 __le16 clock_offset;
1da177e4
LT
42 __s8 rssi;
43};
44
45struct inquiry_entry {
46 struct inquiry_entry *next;
47 __u32 timestamp;
48 struct inquiry_data data;
49};
50
51struct inquiry_cache {
52 spinlock_t lock;
53 __u32 timestamp;
54 struct inquiry_entry *list;
55};
56
57struct hci_conn_hash {
58 struct list_head list;
59 spinlock_t lock;
60 unsigned int acl_num;
61 unsigned int sco_num;
62};
63
64struct hci_dev {
65 struct list_head list;
66 spinlock_t lock;
67 atomic_t refcnt;
68
69 char name[8];
70 unsigned long flags;
71 __u16 id;
72 __u8 type;
73 bdaddr_t bdaddr;
74 __u8 features[8];
1143e5a6
MH
75 __u8 hci_ver;
76 __u16 hci_rev;
77 __u16 manufacturer;
1da177e4
LT
78 __u16 voice_setting;
79
80 __u16 pkt_type;
81 __u16 link_policy;
82 __u16 link_mode;
83
04837f64
MH
84 __u32 idle_timeout;
85 __u16 sniff_min_interval;
86 __u16 sniff_max_interval;
87
1da177e4
LT
88 unsigned long quirks;
89
90 atomic_t cmd_cnt;
91 unsigned int acl_cnt;
92 unsigned int sco_cnt;
93
94 unsigned int acl_mtu;
95 unsigned int sco_mtu;
96 unsigned int acl_pkts;
97 unsigned int sco_pkts;
98
99 unsigned long cmd_last_tx;
100 unsigned long acl_last_tx;
101 unsigned long sco_last_tx;
102
103 struct tasklet_struct cmd_task;
104 struct tasklet_struct rx_task;
105 struct tasklet_struct tx_task;
106
107 struct sk_buff_head rx_q;
108 struct sk_buff_head raw_q;
109 struct sk_buff_head cmd_q;
110
111 struct sk_buff *sent_cmd;
112
113 struct semaphore req_lock;
114 wait_queue_head_t req_wait_q;
115 __u32 req_status;
116 __u32 req_result;
117
118 struct inquiry_cache inq_cache;
119 struct hci_conn_hash conn_hash;
120
121 struct hci_dev_stats stat;
122
123 struct sk_buff_head driver_init;
124
125 void *driver_data;
126 void *core_data;
127
128 atomic_t promisc;
129
a91f2e39
MH
130 struct device *parent;
131 struct device dev;
1da177e4
LT
132
133 struct module *owner;
134
135 int (*open)(struct hci_dev *hdev);
136 int (*close)(struct hci_dev *hdev);
137 int (*flush)(struct hci_dev *hdev);
138 int (*send)(struct sk_buff *skb);
139 void (*destruct)(struct hci_dev *hdev);
140 void (*notify)(struct hci_dev *hdev, unsigned int evt);
141 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
142};
143
144struct hci_conn {
145 struct list_head list;
146
147 atomic_t refcnt;
148 spinlock_t lock;
149
150 bdaddr_t dst;
151 __u16 handle;
152 __u16 state;
04837f64 153 __u8 mode;
1da177e4
LT
154 __u8 type;
155 __u8 out;
156 __u8 dev_class[3];
04837f64
MH
157 __u8 features[8];
158 __u16 interval;
159 __u16 link_policy;
1da177e4 160 __u32 link_mode;
04837f64 161 __u8 power_save;
1da177e4 162 unsigned long pend;
04837f64 163
1da177e4 164 unsigned int sent;
04837f64 165
1da177e4
LT
166 struct sk_buff_head data_q;
167
04837f64
MH
168 struct timer_list disc_timer;
169 struct timer_list idle_timer;
170
b219e3ac
MH
171 struct work_struct work;
172
173 struct device dev;
174
1da177e4
LT
175 struct hci_dev *hdev;
176 void *l2cap_data;
177 void *sco_data;
178 void *priv;
179
180 struct hci_conn *link;
181};
182
183extern struct hci_proto *hci_proto[];
184extern struct list_head hci_dev_list;
185extern struct list_head hci_cb_list;
186extern rwlock_t hci_dev_list_lock;
187extern rwlock_t hci_cb_list_lock;
188
189/* ----- Inquiry cache ----- */
190#define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
191#define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
192
193#define inquiry_cache_lock(c) spin_lock(&c->lock)
194#define inquiry_cache_unlock(c) spin_unlock(&c->lock)
195#define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
196#define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
197
198static inline void inquiry_cache_init(struct hci_dev *hdev)
199{
200 struct inquiry_cache *c = &hdev->inq_cache;
201 spin_lock_init(&c->lock);
202 c->list = NULL;
203}
204
205static inline int inquiry_cache_empty(struct hci_dev *hdev)
206{
207 struct inquiry_cache *c = &hdev->inq_cache;
208 return (c->list == NULL);
209}
210
211static inline long inquiry_cache_age(struct hci_dev *hdev)
212{
213 struct inquiry_cache *c = &hdev->inq_cache;
214 return jiffies - c->timestamp;
215}
216
217static inline long inquiry_entry_age(struct inquiry_entry *e)
218{
219 return jiffies - e->timestamp;
220}
221
222struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
223void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
224
225/* ----- HCI Connections ----- */
226enum {
227 HCI_CONN_AUTH_PEND,
228 HCI_CONN_ENCRYPT_PEND,
04837f64
MH
229 HCI_CONN_RSWITCH_PEND,
230 HCI_CONN_MODE_CHANGE_PEND,
1da177e4
LT
231};
232
233static inline void hci_conn_hash_init(struct hci_dev *hdev)
234{
235 struct hci_conn_hash *h = &hdev->conn_hash;
236 INIT_LIST_HEAD(&h->list);
237 spin_lock_init(&h->lock);
238 h->acl_num = 0;
239 h->sco_num = 0;
240}
241
242static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
243{
244 struct hci_conn_hash *h = &hdev->conn_hash;
245 list_add(&c->list, &h->list);
246 if (c->type == ACL_LINK)
247 h->acl_num++;
248 else
249 h->sco_num++;
250}
251
252static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
253{
254 struct hci_conn_hash *h = &hdev->conn_hash;
255 list_del(&c->list);
256 if (c->type == ACL_LINK)
257 h->acl_num--;
258 else
259 h->sco_num--;
260}
261
262static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
263 __u16 handle)
264{
265 struct hci_conn_hash *h = &hdev->conn_hash;
266 struct list_head *p;
267 struct hci_conn *c;
268
269 list_for_each(p, &h->list) {
270 c = list_entry(p, struct hci_conn, list);
271 if (c->handle == handle)
272 return c;
273 }
274 return NULL;
275}
276
277static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
278 __u8 type, bdaddr_t *ba)
279{
280 struct hci_conn_hash *h = &hdev->conn_hash;
281 struct list_head *p;
282 struct hci_conn *c;
283
284 list_for_each(p, &h->list) {
285 c = list_entry(p, struct hci_conn, list);
286 if (c->type == type && !bacmp(&c->dst, ba))
287 return c;
288 }
289 return NULL;
290}
291
292void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
293void hci_add_sco(struct hci_conn *conn, __u16 handle);
294
295struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
296int hci_conn_del(struct hci_conn *conn);
297void hci_conn_hash_flush(struct hci_dev *hdev);
298
299struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
300int hci_conn_auth(struct hci_conn *conn);
301int hci_conn_encrypt(struct hci_conn *conn);
302int hci_conn_change_link_key(struct hci_conn *conn);
303int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
304
04837f64
MH
305void hci_conn_enter_active_mode(struct hci_conn *conn);
306void hci_conn_enter_sniff_mode(struct hci_conn *conn);
1da177e4
LT
307
308static inline void hci_conn_hold(struct hci_conn *conn)
309{
310 atomic_inc(&conn->refcnt);
04837f64 311 del_timer(&conn->disc_timer);
1da177e4
LT
312}
313
314static inline void hci_conn_put(struct hci_conn *conn)
315{
316 if (atomic_dec_and_test(&conn->refcnt)) {
04837f64 317 unsigned long timeo;
1da177e4 318 if (conn->type == ACL_LINK) {
04837f64 319 del_timer(&conn->idle_timer);
6ac59344
MH
320 if (conn->state == BT_CONNECTED) {
321 timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT);
322 if (!conn->out)
323 timeo *= 2;
324 } else
325 timeo = msecs_to_jiffies(10);
1da177e4 326 } else
04837f64
MH
327 timeo = msecs_to_jiffies(10);
328 mod_timer(&conn->disc_timer, jiffies + timeo);
1da177e4
LT
329 }
330}
331
332/* ----- HCI tasks ----- */
333static inline void hci_sched_cmd(struct hci_dev *hdev)
334{
335 tasklet_schedule(&hdev->cmd_task);
336}
337
338static inline void hci_sched_rx(struct hci_dev *hdev)
339{
340 tasklet_schedule(&hdev->rx_task);
341}
342
343static inline void hci_sched_tx(struct hci_dev *hdev)
344{
345 tasklet_schedule(&hdev->tx_task);
346}
347
348/* ----- HCI Devices ----- */
349static inline void __hci_dev_put(struct hci_dev *d)
350{
351 if (atomic_dec_and_test(&d->refcnt))
352 d->destruct(d);
353}
354
355static inline void hci_dev_put(struct hci_dev *d)
356{
357 __hci_dev_put(d);
358 module_put(d->owner);
359}
360
361static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
362{
363 atomic_inc(&d->refcnt);
364 return d;
365}
366
367static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
368{
369 if (try_module_get(d->owner))
370 return __hci_dev_hold(d);
371 return NULL;
372}
373
374#define hci_dev_lock(d) spin_lock(&d->lock)
375#define hci_dev_unlock(d) spin_unlock(&d->lock)
376#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
377#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
378
379struct hci_dev *hci_dev_get(int index);
380struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
381
382struct hci_dev *hci_alloc_dev(void);
383void hci_free_dev(struct hci_dev *hdev);
384int hci_register_dev(struct hci_dev *hdev);
385int hci_unregister_dev(struct hci_dev *hdev);
386int hci_suspend_dev(struct hci_dev *hdev);
387int hci_resume_dev(struct hci_dev *hdev);
388int hci_dev_open(__u16 dev);
389int hci_dev_close(__u16 dev);
390int hci_dev_reset(__u16 dev);
391int hci_dev_reset_stat(__u16 dev);
392int hci_dev_cmd(unsigned int cmd, void __user *arg);
393int hci_get_dev_list(void __user *arg);
394int hci_get_dev_info(void __user *arg);
395int hci_get_conn_list(void __user *arg);
396int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
397int hci_inquiry(void __user *arg);
398
399void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
400
401/* Receive frame from HCI drivers */
402static inline int hci_recv_frame(struct sk_buff *skb)
403{
404 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
405 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
406 && !test_bit(HCI_INIT, &hdev->flags))) {
407 kfree_skb(skb);
408 return -ENXIO;
409 }
410
411 /* Incomming skb */
412 bt_cb(skb)->incoming = 1;
413
414 /* Time stamp */
a61bbcf2 415 __net_timestamp(skb);
1da177e4
LT
416
417 /* Queue frame for rx task */
418 skb_queue_tail(&hdev->rx_q, skb);
419 hci_sched_rx(hdev);
420 return 0;
421}
422
423int hci_register_sysfs(struct hci_dev *hdev);
424void hci_unregister_sysfs(struct hci_dev *hdev);
b219e3ac
MH
425void hci_conn_add_sysfs(struct hci_conn *conn);
426void hci_conn_del_sysfs(struct hci_conn *conn);
1da177e4 427
a91f2e39 428#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
1da177e4
LT
429
430/* ----- LMP capabilities ----- */
04837f64
MH
431#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
432#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
433#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
434#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
1da177e4
LT
435
436/* ----- HCI protocols ----- */
437struct hci_proto {
438 char *name;
439 unsigned int id;
440 unsigned long flags;
441
442 void *priv;
443
444 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
445 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
446 int (*disconn_ind) (struct hci_conn *conn, __u8 reason);
447 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
448 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
449 int (*auth_cfm) (struct hci_conn *conn, __u8 status);
450 int (*encrypt_cfm) (struct hci_conn *conn, __u8 status);
451};
452
453static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
454{
455 register struct hci_proto *hp;
456 int mask = 0;
457
458 hp = hci_proto[HCI_PROTO_L2CAP];
459 if (hp && hp->connect_ind)
460 mask |= hp->connect_ind(hdev, bdaddr, type);
461
462 hp = hci_proto[HCI_PROTO_SCO];
463 if (hp && hp->connect_ind)
464 mask |= hp->connect_ind(hdev, bdaddr, type);
465
466 return mask;
467}
468
469static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
470{
471 register struct hci_proto *hp;
472
473 hp = hci_proto[HCI_PROTO_L2CAP];
474 if (hp && hp->connect_cfm)
475 hp->connect_cfm(conn, status);
476
477 hp = hci_proto[HCI_PROTO_SCO];
478 if (hp && hp->connect_cfm)
479 hp->connect_cfm(conn, status);
480}
481
482static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
483{
484 register struct hci_proto *hp;
485
486 hp = hci_proto[HCI_PROTO_L2CAP];
487 if (hp && hp->disconn_ind)
488 hp->disconn_ind(conn, reason);
489
490 hp = hci_proto[HCI_PROTO_SCO];
491 if (hp && hp->disconn_ind)
492 hp->disconn_ind(conn, reason);
493}
494
495static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
496{
497 register struct hci_proto *hp;
498
499 hp = hci_proto[HCI_PROTO_L2CAP];
500 if (hp && hp->auth_cfm)
501 hp->auth_cfm(conn, status);
502
503 hp = hci_proto[HCI_PROTO_SCO];
504 if (hp && hp->auth_cfm)
505 hp->auth_cfm(conn, status);
506}
507
508static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status)
509{
510 register struct hci_proto *hp;
511
512 hp = hci_proto[HCI_PROTO_L2CAP];
513 if (hp && hp->encrypt_cfm)
514 hp->encrypt_cfm(conn, status);
515
516 hp = hci_proto[HCI_PROTO_SCO];
517 if (hp && hp->encrypt_cfm)
518 hp->encrypt_cfm(conn, status);
519}
520
521int hci_register_proto(struct hci_proto *hproto);
522int hci_unregister_proto(struct hci_proto *hproto);
523
524/* ----- HCI callbacks ----- */
525struct hci_cb {
526 struct list_head list;
527
528 char *name;
529
530 void (*auth_cfm) (struct hci_conn *conn, __u8 status);
531 void (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
532 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
533 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
534};
535
536static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
537{
538 struct list_head *p;
539
540 hci_proto_auth_cfm(conn, status);
541
542 read_lock_bh(&hci_cb_list_lock);
543 list_for_each(p, &hci_cb_list) {
544 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
545 if (cb->auth_cfm)
546 cb->auth_cfm(conn, status);
547 }
548 read_unlock_bh(&hci_cb_list_lock);
549}
550
551static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
552{
553 struct list_head *p;
554
555 hci_proto_encrypt_cfm(conn, status);
556
557 read_lock_bh(&hci_cb_list_lock);
558 list_for_each(p, &hci_cb_list) {
559 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
560 if (cb->encrypt_cfm)
561 cb->encrypt_cfm(conn, status, encrypt);
562 }
563 read_unlock_bh(&hci_cb_list_lock);
564}
565
566static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
567{
568 struct list_head *p;
569
570 read_lock_bh(&hci_cb_list_lock);
571 list_for_each(p, &hci_cb_list) {
572 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
573 if (cb->key_change_cfm)
574 cb->key_change_cfm(conn, status);
575 }
576 read_unlock_bh(&hci_cb_list_lock);
577}
578
579static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
580{
581 struct list_head *p;
582
583 read_lock_bh(&hci_cb_list_lock);
584 list_for_each(p, &hci_cb_list) {
585 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
586 if (cb->role_switch_cfm)
587 cb->role_switch_cfm(conn, status, role);
588 }
589 read_unlock_bh(&hci_cb_list_lock);
590}
591
592int hci_register_cb(struct hci_cb *hcb);
593int hci_unregister_cb(struct hci_cb *hcb);
594
595int hci_register_notifier(struct notifier_block *nb);
596int hci_unregister_notifier(struct notifier_block *nb);
597
598int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param);
599int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
600int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
601
602void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf);
603
604void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
605
606/* ----- HCI Sockets ----- */
607void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
608
609/* HCI info for socket */
610#define hci_pi(sk) ((struct hci_pinfo *) sk)
611
612struct hci_pinfo {
613 struct bt_sock bt;
614 struct hci_dev *hdev;
615 struct hci_filter filter;
616 __u32 cmsg_mask;
617};
618
619/* HCI security filter */
620#define HCI_SFLT_MAX_OGF 5
621
622struct hci_sec_filter {
623 __u32 type_mask;
624 __u32 event_mask[2];
625 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
626};
627
628/* ----- HCI requests ----- */
629#define HCI_REQ_DONE 0
630#define HCI_REQ_PEND 1
631#define HCI_REQ_CANCELED 2
632
633#define hci_req_lock(d) down(&d->req_lock)
634#define hci_req_unlock(d) up(&d->req_lock)
635
636void hci_req_complete(struct hci_dev *hdev, int result);
637
638#endif /* __HCI_CORE_H */