Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / net / bluetooth / hci_core.h
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27
28 #include <linux/interrupt.h>
29 #include <net/bluetooth/hci.h>
30
31 /* HCI priority */
32 #define HCI_PRIO_MAX 7
33
34 /* HCI Core structures */
35 struct inquiry_data {
36 bdaddr_t bdaddr;
37 __u8 pscan_rep_mode;
38 __u8 pscan_period_mode;
39 __u8 pscan_mode;
40 __u8 dev_class[3];
41 __le16 clock_offset;
42 __s8 rssi;
43 __u8 ssp_mode;
44 };
45
46 struct inquiry_entry {
47 struct inquiry_entry *next;
48 __u32 timestamp;
49 struct inquiry_data data;
50 };
51
52 struct inquiry_cache {
53 __u32 timestamp;
54 struct inquiry_entry *list;
55 };
56
57 struct hci_conn_hash {
58 struct list_head list;
59 unsigned int acl_num;
60 unsigned int sco_num;
61 unsigned int le_num;
62 };
63
64 struct bdaddr_list {
65 struct list_head list;
66 bdaddr_t bdaddr;
67 };
68
69 struct bt_uuid {
70 struct list_head list;
71 u8 uuid[16];
72 u8 svc_hint;
73 };
74
75 struct key_master_id {
76 __le16 ediv;
77 u8 rand[8];
78 } __packed;
79
80 struct link_key_data {
81 bdaddr_t bdaddr;
82 u8 type;
83 u8 val[16];
84 u8 pin_len;
85 u8 dlen;
86 u8 data[0];
87 } __packed;
88
89 struct link_key {
90 struct list_head list;
91 bdaddr_t bdaddr;
92 u8 type;
93 u8 val[16];
94 u8 pin_len;
95 u8 dlen;
96 u8 data[0];
97 };
98
99 struct oob_data {
100 struct list_head list;
101 bdaddr_t bdaddr;
102 u8 hash[16];
103 u8 randomizer[16];
104 };
105
106 struct adv_entry {
107 struct list_head list;
108 bdaddr_t bdaddr;
109 u8 bdaddr_type;
110 };
111
112 #define NUM_REASSEMBLY 4
113 struct hci_dev {
114 struct list_head list;
115 struct mutex lock;
116 atomic_t refcnt;
117
118 char name[8];
119 unsigned long flags;
120 __u16 id;
121 __u8 bus;
122 __u8 dev_type;
123 bdaddr_t bdaddr;
124 __u8 dev_name[HCI_MAX_NAME_LENGTH];
125 __u8 eir[HCI_MAX_EIR_LENGTH];
126 __u8 dev_class[3];
127 __u8 major_class;
128 __u8 minor_class;
129 __u8 features[8];
130 __u8 host_features[8];
131 __u8 commands[64];
132 __u8 ssp_mode;
133 __u8 hci_ver;
134 __u16 hci_rev;
135 __u8 lmp_ver;
136 __u16 manufacturer;
137 __le16 lmp_subver;
138 __u16 voice_setting;
139 __u8 io_capability;
140
141 __u16 pkt_type;
142 __u16 esco_type;
143 __u16 link_policy;
144 __u16 link_mode;
145
146 __u32 idle_timeout;
147 __u16 sniff_min_interval;
148 __u16 sniff_max_interval;
149
150 __u8 amp_status;
151 __u32 amp_total_bw;
152 __u32 amp_max_bw;
153 __u32 amp_min_latency;
154 __u32 amp_max_pdu;
155 __u8 amp_type;
156 __u16 amp_pal_cap;
157 __u16 amp_assoc_size;
158 __u32 amp_max_flush_to;
159 __u32 amp_be_flush_to;
160
161 __u8 flow_ctl_mode;
162
163 unsigned int auto_accept_delay;
164
165 unsigned long quirks;
166
167 atomic_t cmd_cnt;
168 unsigned int acl_cnt;
169 unsigned int sco_cnt;
170 unsigned int le_cnt;
171
172 unsigned int acl_mtu;
173 unsigned int sco_mtu;
174 unsigned int le_mtu;
175 unsigned int acl_pkts;
176 unsigned int sco_pkts;
177 unsigned int le_pkts;
178
179 __u16 block_len;
180 __u16 block_mtu;
181 __u16 num_blocks;
182 __u16 block_cnt;
183
184 unsigned long acl_last_tx;
185 unsigned long sco_last_tx;
186 unsigned long le_last_tx;
187
188 struct workqueue_struct *workqueue;
189
190 struct work_struct power_on;
191 struct delayed_work power_off;
192
193 __u16 discov_timeout;
194 struct delayed_work discov_off;
195
196 struct delayed_work service_cache;
197
198 struct timer_list cmd_timer;
199
200 struct work_struct rx_work;
201 struct work_struct cmd_work;
202 struct work_struct tx_work;
203
204 struct sk_buff_head rx_q;
205 struct sk_buff_head raw_q;
206 struct sk_buff_head cmd_q;
207
208 struct sk_buff *sent_cmd;
209 struct sk_buff *reassembly[NUM_REASSEMBLY];
210
211 struct mutex req_lock;
212 wait_queue_head_t req_wait_q;
213 __u32 req_status;
214 __u32 req_result;
215
216 __u16 init_last_cmd;
217
218 struct list_head mgmt_pending;
219
220 struct inquiry_cache inq_cache;
221 struct hci_conn_hash conn_hash;
222 struct list_head blacklist;
223
224 struct list_head uuids;
225
226 struct list_head link_keys;
227
228 struct list_head remote_oob_data;
229
230 struct list_head adv_entries;
231 struct delayed_work adv_work;
232
233 struct hci_dev_stats stat;
234
235 struct sk_buff_head driver_init;
236
237 void *driver_data;
238 void *core_data;
239
240 atomic_t promisc;
241
242 struct dentry *debugfs;
243
244 struct device *parent;
245 struct device dev;
246
247 struct rfkill *rfkill;
248
249 struct module *owner;
250
251 unsigned long dev_flags;
252
253 int (*open)(struct hci_dev *hdev);
254 int (*close)(struct hci_dev *hdev);
255 int (*flush)(struct hci_dev *hdev);
256 int (*send)(struct sk_buff *skb);
257 void (*destruct)(struct hci_dev *hdev);
258 void (*notify)(struct hci_dev *hdev, unsigned int evt);
259 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
260 };
261
262 struct hci_conn {
263 struct list_head list;
264
265 atomic_t refcnt;
266
267 bdaddr_t dst;
268 __u8 dst_type;
269 __u16 handle;
270 __u16 state;
271 __u8 mode;
272 __u8 type;
273 __u8 out;
274 __u8 attempt;
275 __u8 dev_class[3];
276 __u8 features[8];
277 __u8 ssp_mode;
278 __u16 interval;
279 __u16 pkt_type;
280 __u16 link_policy;
281 __u32 link_mode;
282 __u8 key_type;
283 __u8 auth_type;
284 __u8 sec_level;
285 __u8 pending_sec_level;
286 __u8 pin_length;
287 __u8 enc_key_size;
288 __u8 io_capability;
289 __u8 power_save;
290 __u16 disc_timeout;
291 unsigned long pend;
292
293 __u8 remote_cap;
294 __u8 remote_oob;
295 __u8 remote_auth;
296
297 unsigned int sent;
298
299 struct sk_buff_head data_q;
300 struct list_head chan_list;
301
302 struct delayed_work disc_work;
303 struct timer_list idle_timer;
304 struct timer_list auto_accept_timer;
305
306 struct device dev;
307 atomic_t devref;
308
309 struct hci_dev *hdev;
310 void *l2cap_data;
311 void *sco_data;
312 void *smp_conn;
313
314 struct hci_conn *link;
315
316 void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
317 void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
318 void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason);
319 };
320
321 struct hci_chan {
322 struct list_head list;
323
324 struct hci_conn *conn;
325 struct sk_buff_head data_q;
326 unsigned int sent;
327 };
328
329 extern struct list_head hci_dev_list;
330 extern struct list_head hci_cb_list;
331 extern rwlock_t hci_dev_list_lock;
332 extern rwlock_t hci_cb_list_lock;
333
334 /* ----- HCI interface to upper protocols ----- */
335 extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
336 extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
337 extern int l2cap_disconn_ind(struct hci_conn *hcon);
338 extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
339 extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
340 extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
341
342 extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
343 extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status);
344 extern int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
345 extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
346
347 /* ----- Inquiry cache ----- */
348 #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
349 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
350
351 static inline void inquiry_cache_init(struct hci_dev *hdev)
352 {
353 struct inquiry_cache *c = &hdev->inq_cache;
354 c->list = NULL;
355 }
356
357 static inline int inquiry_cache_empty(struct hci_dev *hdev)
358 {
359 struct inquiry_cache *c = &hdev->inq_cache;
360 return c->list == NULL;
361 }
362
363 static inline long inquiry_cache_age(struct hci_dev *hdev)
364 {
365 struct inquiry_cache *c = &hdev->inq_cache;
366 return jiffies - c->timestamp;
367 }
368
369 static inline long inquiry_entry_age(struct inquiry_entry *e)
370 {
371 return jiffies - e->timestamp;
372 }
373
374 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
375 bdaddr_t *bdaddr);
376 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
377
378 /* ----- HCI Connections ----- */
379 enum {
380 HCI_CONN_AUTH_PEND,
381 HCI_CONN_REAUTH_PEND,
382 HCI_CONN_ENCRYPT_PEND,
383 HCI_CONN_RSWITCH_PEND,
384 HCI_CONN_MODE_CHANGE_PEND,
385 HCI_CONN_SCO_SETUP_PEND,
386 HCI_CONN_LE_SMP_PEND,
387 };
388
389 static inline void hci_conn_hash_init(struct hci_dev *hdev)
390 {
391 struct hci_conn_hash *h = &hdev->conn_hash;
392 INIT_LIST_HEAD(&h->list);
393 h->acl_num = 0;
394 h->sco_num = 0;
395 h->le_num = 0;
396 }
397
398 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
399 {
400 struct hci_conn_hash *h = &hdev->conn_hash;
401 list_add_rcu(&c->list, &h->list);
402 switch (c->type) {
403 case ACL_LINK:
404 h->acl_num++;
405 break;
406 case LE_LINK:
407 h->le_num++;
408 break;
409 case SCO_LINK:
410 case ESCO_LINK:
411 h->sco_num++;
412 break;
413 }
414 }
415
416 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
417 {
418 struct hci_conn_hash *h = &hdev->conn_hash;
419
420 list_del_rcu(&c->list);
421 synchronize_rcu();
422
423 switch (c->type) {
424 case ACL_LINK:
425 h->acl_num--;
426 break;
427 case LE_LINK:
428 h->le_num--;
429 break;
430 case SCO_LINK:
431 case ESCO_LINK:
432 h->sco_num--;
433 break;
434 }
435 }
436
437 static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
438 {
439 struct hci_conn_hash *h = &hdev->conn_hash;
440 switch (type) {
441 case ACL_LINK:
442 return h->acl_num;
443 case LE_LINK:
444 return h->le_num;
445 case SCO_LINK:
446 case ESCO_LINK:
447 return h->sco_num;
448 default:
449 return 0;
450 }
451 }
452
453 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
454 __u16 handle)
455 {
456 struct hci_conn_hash *h = &hdev->conn_hash;
457 struct hci_conn *c;
458
459 rcu_read_lock();
460
461 list_for_each_entry_rcu(c, &h->list, list) {
462 if (c->handle == handle) {
463 rcu_read_unlock();
464 return c;
465 }
466 }
467 rcu_read_unlock();
468
469 return NULL;
470 }
471
472 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
473 __u8 type, bdaddr_t *ba)
474 {
475 struct hci_conn_hash *h = &hdev->conn_hash;
476 struct hci_conn *c;
477
478 rcu_read_lock();
479
480 list_for_each_entry_rcu(c, &h->list, list) {
481 if (c->type == type && !bacmp(&c->dst, ba)) {
482 rcu_read_unlock();
483 return c;
484 }
485 }
486
487 rcu_read_unlock();
488
489 return NULL;
490 }
491
492 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
493 __u8 type, __u16 state)
494 {
495 struct hci_conn_hash *h = &hdev->conn_hash;
496 struct hci_conn *c;
497
498 rcu_read_lock();
499
500 list_for_each_entry_rcu(c, &h->list, list) {
501 if (c->type == type && c->state == state) {
502 rcu_read_unlock();
503 return c;
504 }
505 }
506
507 rcu_read_unlock();
508
509 return NULL;
510 }
511
512 void hci_acl_connect(struct hci_conn *conn);
513 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
514 void hci_add_sco(struct hci_conn *conn, __u16 handle);
515 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
516 void hci_sco_setup(struct hci_conn *conn, __u8 status);
517
518 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
519 int hci_conn_del(struct hci_conn *conn);
520 void hci_conn_hash_flush(struct hci_dev *hdev);
521 void hci_conn_check_pending(struct hci_dev *hdev);
522
523 struct hci_chan *hci_chan_create(struct hci_conn *conn);
524 int hci_chan_del(struct hci_chan *chan);
525 void hci_chan_list_flush(struct hci_conn *conn);
526
527 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
528 __u8 sec_level, __u8 auth_type);
529 int hci_conn_check_link_mode(struct hci_conn *conn);
530 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
531 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
532 int hci_conn_change_link_key(struct hci_conn *conn);
533 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
534
535 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
536
537 void hci_conn_hold_device(struct hci_conn *conn);
538 void hci_conn_put_device(struct hci_conn *conn);
539
540 static inline void hci_conn_hold(struct hci_conn *conn)
541 {
542 atomic_inc(&conn->refcnt);
543 cancel_delayed_work_sync(&conn->disc_work);
544 }
545
546 static inline void hci_conn_put(struct hci_conn *conn)
547 {
548 if (atomic_dec_and_test(&conn->refcnt)) {
549 unsigned long timeo;
550 if (conn->type == ACL_LINK || conn->type == LE_LINK) {
551 del_timer(&conn->idle_timer);
552 if (conn->state == BT_CONNECTED) {
553 timeo = msecs_to_jiffies(conn->disc_timeout);
554 if (!conn->out)
555 timeo *= 2;
556 } else {
557 timeo = msecs_to_jiffies(10);
558 }
559 } else {
560 timeo = msecs_to_jiffies(10);
561 }
562 cancel_delayed_work_sync(&conn->disc_work);
563 queue_delayed_work(conn->hdev->workqueue,
564 &conn->disc_work, jiffies + timeo);
565 }
566 }
567
568 /* ----- HCI Devices ----- */
569 static inline void __hci_dev_put(struct hci_dev *d)
570 {
571 if (atomic_dec_and_test(&d->refcnt))
572 d->destruct(d);
573 }
574
575 /*
576 * hci_dev_put and hci_dev_hold are macros to avoid dragging all the
577 * overhead of all the modular infrastructure into this header.
578 */
579 #define hci_dev_put(d) \
580 do { \
581 __hci_dev_put(d); \
582 module_put(d->owner); \
583 } while (0)
584
585 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
586 {
587 atomic_inc(&d->refcnt);
588 return d;
589 }
590
591 #define hci_dev_hold(d) \
592 ({ \
593 try_module_get(d->owner) ? __hci_dev_hold(d) : NULL; \
594 })
595
596 #define hci_dev_lock(d) mutex_lock(&d->lock)
597 #define hci_dev_unlock(d) mutex_unlock(&d->lock)
598
599 struct hci_dev *hci_dev_get(int index);
600 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
601
602 struct hci_dev *hci_alloc_dev(void);
603 void hci_free_dev(struct hci_dev *hdev);
604 int hci_register_dev(struct hci_dev *hdev);
605 void hci_unregister_dev(struct hci_dev *hdev);
606 int hci_suspend_dev(struct hci_dev *hdev);
607 int hci_resume_dev(struct hci_dev *hdev);
608 int hci_dev_open(__u16 dev);
609 int hci_dev_close(__u16 dev);
610 int hci_dev_reset(__u16 dev);
611 int hci_dev_reset_stat(__u16 dev);
612 int hci_dev_cmd(unsigned int cmd, void __user *arg);
613 int hci_get_dev_list(void __user *arg);
614 int hci_get_dev_info(void __user *arg);
615 int hci_get_conn_list(void __user *arg);
616 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
617 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
618 int hci_inquiry(void __user *arg);
619
620 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
621 int hci_blacklist_clear(struct hci_dev *hdev);
622 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr);
623 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr);
624
625 int hci_uuids_clear(struct hci_dev *hdev);
626
627 int hci_link_keys_clear(struct hci_dev *hdev);
628 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
629 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
630 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len);
631 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]);
632 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
633 bdaddr_t *bdaddr, u8 type);
634 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
635 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16]);
636 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
637
638 int hci_remote_oob_data_clear(struct hci_dev *hdev);
639 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
640 bdaddr_t *bdaddr);
641 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
642 u8 *randomizer);
643 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
644
645 #define ADV_CLEAR_TIMEOUT (3*60*HZ) /* Three minutes */
646 int hci_adv_entries_clear(struct hci_dev *hdev);
647 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr);
648 int hci_add_adv_entry(struct hci_dev *hdev,
649 struct hci_ev_le_advertising_info *ev);
650
651 void hci_del_off_timer(struct hci_dev *hdev);
652
653 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
654
655 int hci_recv_frame(struct sk_buff *skb);
656 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
657 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
658
659 void hci_init_sysfs(struct hci_dev *hdev);
660 int hci_add_sysfs(struct hci_dev *hdev);
661 void hci_del_sysfs(struct hci_dev *hdev);
662 void hci_conn_init_sysfs(struct hci_conn *conn);
663 void hci_conn_add_sysfs(struct hci_conn *conn);
664 void hci_conn_del_sysfs(struct hci_conn *conn);
665
666 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
667
668 /* ----- LMP capabilities ----- */
669 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
670 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
671 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
672 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
673 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
674 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
675 #define lmp_no_flush_capable(dev) ((dev)->features[6] & LMP_NO_FLUSH)
676 #define lmp_le_capable(dev) ((dev)->features[4] & LMP_LE)
677
678 /* ----- Extended LMP capabilities ----- */
679 #define lmp_host_le_capable(dev) ((dev)->host_features[0] & LMP_HOST_LE)
680
681 /* ----- HCI protocols ----- */
682 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
683 __u8 type)
684 {
685 switch (type) {
686 case ACL_LINK:
687 return l2cap_connect_ind(hdev, bdaddr);
688
689 case SCO_LINK:
690 case ESCO_LINK:
691 return sco_connect_ind(hdev, bdaddr);
692
693 default:
694 BT_ERR("unknown link type %d", type);
695 return -EINVAL;
696 }
697 }
698
699 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
700 {
701 switch (conn->type) {
702 case ACL_LINK:
703 case LE_LINK:
704 l2cap_connect_cfm(conn, status);
705 break;
706
707 case SCO_LINK:
708 case ESCO_LINK:
709 sco_connect_cfm(conn, status);
710 break;
711
712 default:
713 BT_ERR("unknown link type %d", conn->type);
714 break;
715 }
716
717 if (conn->connect_cfm_cb)
718 conn->connect_cfm_cb(conn, status);
719 }
720
721 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
722 {
723 if (conn->type != ACL_LINK && conn->type != LE_LINK)
724 return HCI_ERROR_REMOTE_USER_TERM;
725
726 return l2cap_disconn_ind(conn);
727 }
728
729 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
730 {
731 switch (conn->type) {
732 case ACL_LINK:
733 case LE_LINK:
734 l2cap_disconn_cfm(conn, reason);
735 break;
736
737 case SCO_LINK:
738 case ESCO_LINK:
739 sco_disconn_cfm(conn, reason);
740 break;
741
742 default:
743 BT_ERR("unknown link type %d", conn->type);
744 break;
745 }
746
747 if (conn->disconn_cfm_cb)
748 conn->disconn_cfm_cb(conn, reason);
749 }
750
751 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
752 {
753 __u8 encrypt;
754
755 if (conn->type != ACL_LINK && conn->type != LE_LINK)
756 return;
757
758 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
759 return;
760
761 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
762 l2cap_security_cfm(conn, status, encrypt);
763
764 if (conn->security_cfm_cb)
765 conn->security_cfm_cb(conn, status);
766 }
767
768 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status,
769 __u8 encrypt)
770 {
771 if (conn->type != ACL_LINK && conn->type != LE_LINK)
772 return;
773
774 l2cap_security_cfm(conn, status, encrypt);
775
776 if (conn->security_cfm_cb)
777 conn->security_cfm_cb(conn, status);
778 }
779
780 /* ----- HCI callbacks ----- */
781 struct hci_cb {
782 struct list_head list;
783
784 char *name;
785
786 void (*security_cfm) (struct hci_conn *conn, __u8 status,
787 __u8 encrypt);
788 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
789 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
790 };
791
792 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
793 {
794 struct list_head *p;
795 __u8 encrypt;
796
797 hci_proto_auth_cfm(conn, status);
798
799 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
800 return;
801
802 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
803
804 read_lock(&hci_cb_list_lock);
805 list_for_each(p, &hci_cb_list) {
806 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
807 if (cb->security_cfm)
808 cb->security_cfm(conn, status, encrypt);
809 }
810 read_unlock(&hci_cb_list_lock);
811 }
812
813 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
814 __u8 encrypt)
815 {
816 struct list_head *p;
817
818 if (conn->sec_level == BT_SECURITY_SDP)
819 conn->sec_level = BT_SECURITY_LOW;
820
821 if (conn->pending_sec_level > conn->sec_level)
822 conn->sec_level = conn->pending_sec_level;
823
824 hci_proto_encrypt_cfm(conn, status, encrypt);
825
826 read_lock(&hci_cb_list_lock);
827 list_for_each(p, &hci_cb_list) {
828 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
829 if (cb->security_cfm)
830 cb->security_cfm(conn, status, encrypt);
831 }
832 read_unlock(&hci_cb_list_lock);
833 }
834
835 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
836 {
837 struct list_head *p;
838
839 read_lock(&hci_cb_list_lock);
840 list_for_each(p, &hci_cb_list) {
841 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
842 if (cb->key_change_cfm)
843 cb->key_change_cfm(conn, status);
844 }
845 read_unlock(&hci_cb_list_lock);
846 }
847
848 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
849 __u8 role)
850 {
851 struct list_head *p;
852
853 read_lock(&hci_cb_list_lock);
854 list_for_each(p, &hci_cb_list) {
855 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
856 if (cb->role_switch_cfm)
857 cb->role_switch_cfm(conn, status, role);
858 }
859 read_unlock(&hci_cb_list_lock);
860 }
861
862 int hci_register_cb(struct hci_cb *hcb);
863 int hci_unregister_cb(struct hci_cb *hcb);
864
865 int hci_register_notifier(struct notifier_block *nb);
866 int hci_unregister_notifier(struct notifier_block *nb);
867
868 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
869 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
870 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
871
872 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
873
874 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
875
876 /* ----- HCI Sockets ----- */
877 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
878 struct sock *skip_sk);
879
880 /* Management interface */
881 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
882 int mgmt_index_added(struct hci_dev *hdev);
883 int mgmt_index_removed(struct hci_dev *hdev);
884 int mgmt_powered(struct hci_dev *hdev, u8 powered);
885 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
886 int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
887 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
888 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
889 u8 persistent);
890 int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
891 u8 addr_type);
892 int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
893 u8 addr_type);
894 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
895 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
896 u8 addr_type, u8 status);
897 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
898 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
899 u8 status);
900 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
901 u8 status);
902 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
903 __le32 value, u8 confirm_hint);
904 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
905 u8 status);
906 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
907 bdaddr_t *bdaddr, u8 status);
908 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr);
909 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
910 u8 status);
911 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev,
912 bdaddr_t *bdaddr, u8 status);
913 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
914 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
915 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
916 u8 *randomizer, u8 status);
917 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
918 u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir);
919 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name);
920 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status);
921 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status);
922 int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
923 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
924 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
925
926 /* HCI info for socket */
927 #define hci_pi(sk) ((struct hci_pinfo *) sk)
928
929 /* HCI socket flags */
930 #define HCI_PI_MGMT_INIT 0
931
932 struct hci_pinfo {
933 struct bt_sock bt;
934 struct hci_dev *hdev;
935 struct hci_filter filter;
936 __u32 cmsg_mask;
937 unsigned short channel;
938 unsigned long flags;
939 };
940
941 /* HCI security filter */
942 #define HCI_SFLT_MAX_OGF 5
943
944 struct hci_sec_filter {
945 __u32 type_mask;
946 __u32 event_mask[2];
947 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
948 };
949
950 /* ----- HCI requests ----- */
951 #define HCI_REQ_DONE 0
952 #define HCI_REQ_PEND 1
953 #define HCI_REQ_CANCELED 2
954
955 #define hci_req_lock(d) mutex_lock(&d->req_lock)
956 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
957
958 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result);
959
960 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
961 u16 latency, u16 to_multiplier);
962 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
963 __u8 ltk[16]);
964 void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]);
965 void hci_le_ltk_neg_reply(struct hci_conn *conn);
966
967 int hci_do_inquiry(struct hci_dev *hdev, u8 length);
968 int hci_cancel_inquiry(struct hci_dev *hdev);
969
970 #endif /* __HCI_CORE_H */