7451a9c92d9d5d06733498a6baaf43dd26ddc186
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / net / bluetooth / hci_core.h
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27
28 #include <net/bluetooth/hci.h>
29
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
33
34 /* HCI Core structures */
35 struct inquiry_data {
36 bdaddr_t bdaddr;
37 __u8 pscan_rep_mode;
38 __u8 pscan_period_mode;
39 __u8 pscan_mode;
40 __u8 dev_class[3];
41 __le16 clock_offset;
42 __s8 rssi;
43 };
44
45 struct inquiry_entry {
46 struct inquiry_entry *next;
47 __u32 timestamp;
48 struct inquiry_data data;
49 };
50
51 struct inquiry_cache {
52 spinlock_t lock;
53 __u32 timestamp;
54 struct inquiry_entry *list;
55 };
56
57 struct hci_conn_hash {
58 struct list_head list;
59 spinlock_t lock;
60 unsigned int acl_num;
61 unsigned int sco_num;
62 };
63
64 struct hci_dev {
65 struct list_head list;
66 spinlock_t lock;
67 atomic_t refcnt;
68
69 char name[8];
70 unsigned long flags;
71 __u16 id;
72 __u8 type;
73 bdaddr_t bdaddr;
74 __u8 features[8];
75 __u8 hci_ver;
76 __u16 hci_rev;
77 __u16 manufacturer;
78 __u16 voice_setting;
79
80 __u16 pkt_type;
81 __u16 link_policy;
82 __u16 link_mode;
83
84 __u32 idle_timeout;
85 __u16 sniff_min_interval;
86 __u16 sniff_max_interval;
87
88 unsigned long quirks;
89
90 atomic_t cmd_cnt;
91 unsigned int acl_cnt;
92 unsigned int sco_cnt;
93
94 unsigned int acl_mtu;
95 unsigned int sco_mtu;
96 unsigned int acl_pkts;
97 unsigned int sco_pkts;
98
99 unsigned long cmd_last_tx;
100 unsigned long acl_last_tx;
101 unsigned long sco_last_tx;
102
103 struct tasklet_struct cmd_task;
104 struct tasklet_struct rx_task;
105 struct tasklet_struct tx_task;
106
107 struct sk_buff_head rx_q;
108 struct sk_buff_head raw_q;
109 struct sk_buff_head cmd_q;
110
111 struct sk_buff *sent_cmd;
112
113 struct semaphore req_lock;
114 wait_queue_head_t req_wait_q;
115 __u32 req_status;
116 __u32 req_result;
117
118 struct inquiry_cache inq_cache;
119 struct hci_conn_hash conn_hash;
120
121 struct hci_dev_stats stat;
122
123 struct sk_buff_head driver_init;
124
125 void *driver_data;
126 void *core_data;
127
128 atomic_t promisc;
129
130 struct device *parent;
131 struct device dev;
132
133 struct module *owner;
134
135 int (*open)(struct hci_dev *hdev);
136 int (*close)(struct hci_dev *hdev);
137 int (*flush)(struct hci_dev *hdev);
138 int (*send)(struct sk_buff *skb);
139 void (*destruct)(struct hci_dev *hdev);
140 void (*notify)(struct hci_dev *hdev, unsigned int evt);
141 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
142 };
143
144 struct hci_conn {
145 struct list_head list;
146
147 atomic_t refcnt;
148 spinlock_t lock;
149
150 bdaddr_t dst;
151 __u16 handle;
152 __u16 state;
153 __u8 mode;
154 __u8 type;
155 __u8 out;
156 __u8 dev_class[3];
157 __u8 features[8];
158 __u16 interval;
159 __u16 link_policy;
160 __u32 link_mode;
161 __u8 power_save;
162 unsigned long pend;
163
164 unsigned int sent;
165
166 struct sk_buff_head data_q;
167
168 struct timer_list disc_timer;
169 struct timer_list idle_timer;
170
171 struct work_struct work;
172
173 struct device dev;
174
175 struct hci_dev *hdev;
176 void *l2cap_data;
177 void *sco_data;
178 void *priv;
179
180 struct hci_conn *link;
181 };
182
183 extern struct hci_proto *hci_proto[];
184 extern struct list_head hci_dev_list;
185 extern struct list_head hci_cb_list;
186 extern rwlock_t hci_dev_list_lock;
187 extern rwlock_t hci_cb_list_lock;
188
189 /* ----- Inquiry cache ----- */
190 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
191 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
192
193 #define inquiry_cache_lock(c) spin_lock(&c->lock)
194 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
195 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
196 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
197
198 static inline void inquiry_cache_init(struct hci_dev *hdev)
199 {
200 struct inquiry_cache *c = &hdev->inq_cache;
201 spin_lock_init(&c->lock);
202 c->list = NULL;
203 }
204
205 static inline int inquiry_cache_empty(struct hci_dev *hdev)
206 {
207 struct inquiry_cache *c = &hdev->inq_cache;
208 return (c->list == NULL);
209 }
210
211 static inline long inquiry_cache_age(struct hci_dev *hdev)
212 {
213 struct inquiry_cache *c = &hdev->inq_cache;
214 return jiffies - c->timestamp;
215 }
216
217 static inline long inquiry_entry_age(struct inquiry_entry *e)
218 {
219 return jiffies - e->timestamp;
220 }
221
222 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
223 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
224
225 /* ----- HCI Connections ----- */
226 enum {
227 HCI_CONN_AUTH_PEND,
228 HCI_CONN_ENCRYPT_PEND,
229 HCI_CONN_RSWITCH_PEND,
230 HCI_CONN_MODE_CHANGE_PEND,
231 };
232
233 static inline void hci_conn_hash_init(struct hci_dev *hdev)
234 {
235 struct hci_conn_hash *h = &hdev->conn_hash;
236 INIT_LIST_HEAD(&h->list);
237 spin_lock_init(&h->lock);
238 h->acl_num = 0;
239 h->sco_num = 0;
240 }
241
242 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
243 {
244 struct hci_conn_hash *h = &hdev->conn_hash;
245 list_add(&c->list, &h->list);
246 if (c->type == ACL_LINK)
247 h->acl_num++;
248 else
249 h->sco_num++;
250 }
251
252 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
253 {
254 struct hci_conn_hash *h = &hdev->conn_hash;
255 list_del(&c->list);
256 if (c->type == ACL_LINK)
257 h->acl_num--;
258 else
259 h->sco_num--;
260 }
261
262 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
263 __u16 handle)
264 {
265 struct hci_conn_hash *h = &hdev->conn_hash;
266 struct list_head *p;
267 struct hci_conn *c;
268
269 list_for_each(p, &h->list) {
270 c = list_entry(p, struct hci_conn, list);
271 if (c->handle == handle)
272 return c;
273 }
274 return NULL;
275 }
276
277 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
278 __u8 type, bdaddr_t *ba)
279 {
280 struct hci_conn_hash *h = &hdev->conn_hash;
281 struct list_head *p;
282 struct hci_conn *c;
283
284 list_for_each(p, &h->list) {
285 c = list_entry(p, struct hci_conn, list);
286 if (c->type == type && !bacmp(&c->dst, ba))
287 return c;
288 }
289 return NULL;
290 }
291
292 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
293 void hci_add_sco(struct hci_conn *conn, __u16 handle);
294
295 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
296 int hci_conn_del(struct hci_conn *conn);
297 void hci_conn_hash_flush(struct hci_dev *hdev);
298
299 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
300 int hci_conn_auth(struct hci_conn *conn);
301 int hci_conn_encrypt(struct hci_conn *conn);
302 int hci_conn_change_link_key(struct hci_conn *conn);
303 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
304
305 void hci_conn_enter_active_mode(struct hci_conn *conn);
306 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
307
308 static inline void hci_conn_hold(struct hci_conn *conn)
309 {
310 atomic_inc(&conn->refcnt);
311 del_timer(&conn->disc_timer);
312 }
313
314 static inline void hci_conn_put(struct hci_conn *conn)
315 {
316 if (atomic_dec_and_test(&conn->refcnt)) {
317 unsigned long timeo;
318 if (conn->type == ACL_LINK) {
319 timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT);
320 if (!conn->out)
321 timeo *= 2;
322 del_timer(&conn->idle_timer);
323 } else
324 timeo = msecs_to_jiffies(10);
325 mod_timer(&conn->disc_timer, jiffies + timeo);
326 }
327 }
328
329 /* ----- HCI tasks ----- */
330 static inline void hci_sched_cmd(struct hci_dev *hdev)
331 {
332 tasklet_schedule(&hdev->cmd_task);
333 }
334
335 static inline void hci_sched_rx(struct hci_dev *hdev)
336 {
337 tasklet_schedule(&hdev->rx_task);
338 }
339
340 static inline void hci_sched_tx(struct hci_dev *hdev)
341 {
342 tasklet_schedule(&hdev->tx_task);
343 }
344
345 /* ----- HCI Devices ----- */
346 static inline void __hci_dev_put(struct hci_dev *d)
347 {
348 if (atomic_dec_and_test(&d->refcnt))
349 d->destruct(d);
350 }
351
352 static inline void hci_dev_put(struct hci_dev *d)
353 {
354 __hci_dev_put(d);
355 module_put(d->owner);
356 }
357
358 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
359 {
360 atomic_inc(&d->refcnt);
361 return d;
362 }
363
364 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
365 {
366 if (try_module_get(d->owner))
367 return __hci_dev_hold(d);
368 return NULL;
369 }
370
371 #define hci_dev_lock(d) spin_lock(&d->lock)
372 #define hci_dev_unlock(d) spin_unlock(&d->lock)
373 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
374 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
375
376 struct hci_dev *hci_dev_get(int index);
377 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
378
379 struct hci_dev *hci_alloc_dev(void);
380 void hci_free_dev(struct hci_dev *hdev);
381 int hci_register_dev(struct hci_dev *hdev);
382 int hci_unregister_dev(struct hci_dev *hdev);
383 int hci_suspend_dev(struct hci_dev *hdev);
384 int hci_resume_dev(struct hci_dev *hdev);
385 int hci_dev_open(__u16 dev);
386 int hci_dev_close(__u16 dev);
387 int hci_dev_reset(__u16 dev);
388 int hci_dev_reset_stat(__u16 dev);
389 int hci_dev_cmd(unsigned int cmd, void __user *arg);
390 int hci_get_dev_list(void __user *arg);
391 int hci_get_dev_info(void __user *arg);
392 int hci_get_conn_list(void __user *arg);
393 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
394 int hci_inquiry(void __user *arg);
395
396 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
397
398 /* Receive frame from HCI drivers */
399 static inline int hci_recv_frame(struct sk_buff *skb)
400 {
401 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
402 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
403 && !test_bit(HCI_INIT, &hdev->flags))) {
404 kfree_skb(skb);
405 return -ENXIO;
406 }
407
408 /* Incomming skb */
409 bt_cb(skb)->incoming = 1;
410
411 /* Time stamp */
412 __net_timestamp(skb);
413
414 /* Queue frame for rx task */
415 skb_queue_tail(&hdev->rx_q, skb);
416 hci_sched_rx(hdev);
417 return 0;
418 }
419
420 int hci_register_sysfs(struct hci_dev *hdev);
421 void hci_unregister_sysfs(struct hci_dev *hdev);
422 void hci_conn_add_sysfs(struct hci_conn *conn);
423 void hci_conn_del_sysfs(struct hci_conn *conn);
424
425 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
426
427 /* ----- LMP capabilities ----- */
428 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
429 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
430 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
431 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
432
433 /* ----- HCI protocols ----- */
434 struct hci_proto {
435 char *name;
436 unsigned int id;
437 unsigned long flags;
438
439 void *priv;
440
441 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
442 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
443 int (*disconn_ind) (struct hci_conn *conn, __u8 reason);
444 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
445 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
446 int (*auth_cfm) (struct hci_conn *conn, __u8 status);
447 int (*encrypt_cfm) (struct hci_conn *conn, __u8 status);
448 };
449
450 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
451 {
452 register struct hci_proto *hp;
453 int mask = 0;
454
455 hp = hci_proto[HCI_PROTO_L2CAP];
456 if (hp && hp->connect_ind)
457 mask |= hp->connect_ind(hdev, bdaddr, type);
458
459 hp = hci_proto[HCI_PROTO_SCO];
460 if (hp && hp->connect_ind)
461 mask |= hp->connect_ind(hdev, bdaddr, type);
462
463 return mask;
464 }
465
466 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
467 {
468 register struct hci_proto *hp;
469
470 hp = hci_proto[HCI_PROTO_L2CAP];
471 if (hp && hp->connect_cfm)
472 hp->connect_cfm(conn, status);
473
474 hp = hci_proto[HCI_PROTO_SCO];
475 if (hp && hp->connect_cfm)
476 hp->connect_cfm(conn, status);
477 }
478
479 static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
480 {
481 register struct hci_proto *hp;
482
483 hp = hci_proto[HCI_PROTO_L2CAP];
484 if (hp && hp->disconn_ind)
485 hp->disconn_ind(conn, reason);
486
487 hp = hci_proto[HCI_PROTO_SCO];
488 if (hp && hp->disconn_ind)
489 hp->disconn_ind(conn, reason);
490 }
491
492 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
493 {
494 register struct hci_proto *hp;
495
496 hp = hci_proto[HCI_PROTO_L2CAP];
497 if (hp && hp->auth_cfm)
498 hp->auth_cfm(conn, status);
499
500 hp = hci_proto[HCI_PROTO_SCO];
501 if (hp && hp->auth_cfm)
502 hp->auth_cfm(conn, status);
503 }
504
505 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status)
506 {
507 register struct hci_proto *hp;
508
509 hp = hci_proto[HCI_PROTO_L2CAP];
510 if (hp && hp->encrypt_cfm)
511 hp->encrypt_cfm(conn, status);
512
513 hp = hci_proto[HCI_PROTO_SCO];
514 if (hp && hp->encrypt_cfm)
515 hp->encrypt_cfm(conn, status);
516 }
517
518 int hci_register_proto(struct hci_proto *hproto);
519 int hci_unregister_proto(struct hci_proto *hproto);
520
521 /* ----- HCI callbacks ----- */
522 struct hci_cb {
523 struct list_head list;
524
525 char *name;
526
527 void (*auth_cfm) (struct hci_conn *conn, __u8 status);
528 void (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
529 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
530 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
531 };
532
533 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
534 {
535 struct list_head *p;
536
537 hci_proto_auth_cfm(conn, status);
538
539 read_lock_bh(&hci_cb_list_lock);
540 list_for_each(p, &hci_cb_list) {
541 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
542 if (cb->auth_cfm)
543 cb->auth_cfm(conn, status);
544 }
545 read_unlock_bh(&hci_cb_list_lock);
546 }
547
548 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
549 {
550 struct list_head *p;
551
552 hci_proto_encrypt_cfm(conn, status);
553
554 read_lock_bh(&hci_cb_list_lock);
555 list_for_each(p, &hci_cb_list) {
556 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
557 if (cb->encrypt_cfm)
558 cb->encrypt_cfm(conn, status, encrypt);
559 }
560 read_unlock_bh(&hci_cb_list_lock);
561 }
562
563 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
564 {
565 struct list_head *p;
566
567 read_lock_bh(&hci_cb_list_lock);
568 list_for_each(p, &hci_cb_list) {
569 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
570 if (cb->key_change_cfm)
571 cb->key_change_cfm(conn, status);
572 }
573 read_unlock_bh(&hci_cb_list_lock);
574 }
575
576 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
577 {
578 struct list_head *p;
579
580 read_lock_bh(&hci_cb_list_lock);
581 list_for_each(p, &hci_cb_list) {
582 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
583 if (cb->role_switch_cfm)
584 cb->role_switch_cfm(conn, status, role);
585 }
586 read_unlock_bh(&hci_cb_list_lock);
587 }
588
589 int hci_register_cb(struct hci_cb *hcb);
590 int hci_unregister_cb(struct hci_cb *hcb);
591
592 int hci_register_notifier(struct notifier_block *nb);
593 int hci_unregister_notifier(struct notifier_block *nb);
594
595 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param);
596 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
597 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
598
599 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf);
600
601 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
602
603 /* ----- HCI Sockets ----- */
604 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
605
606 /* HCI info for socket */
607 #define hci_pi(sk) ((struct hci_pinfo *) sk)
608
609 struct hci_pinfo {
610 struct bt_sock bt;
611 struct hci_dev *hdev;
612 struct hci_filter filter;
613 __u32 cmsg_mask;
614 };
615
616 /* HCI security filter */
617 #define HCI_SFLT_MAX_OGF 5
618
619 struct hci_sec_filter {
620 __u32 type_mask;
621 __u32 event_mask[2];
622 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
623 };
624
625 /* ----- HCI requests ----- */
626 #define HCI_REQ_DONE 0
627 #define HCI_REQ_PEND 1
628 #define HCI_REQ_CANCELED 2
629
630 #define hci_req_lock(d) down(&d->req_lock)
631 #define hci_req_unlock(d) up(&d->req_lock)
632
633 void hci_req_complete(struct hci_dev *hdev, int result);
634
635 #endif /* __HCI_CORE_H */