Merge tag 'late-omap' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / net / bluetooth / hci_core.h
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27
28 #include <net/bluetooth/hci.h>
29
30 /* HCI priority */
31 #define HCI_PRIO_MAX 7
32
33 /* HCI Core structures */
34 struct inquiry_data {
35 bdaddr_t bdaddr;
36 __u8 pscan_rep_mode;
37 __u8 pscan_period_mode;
38 __u8 pscan_mode;
39 __u8 dev_class[3];
40 __le16 clock_offset;
41 __s8 rssi;
42 __u8 ssp_mode;
43 };
44
45 struct inquiry_entry {
46 struct list_head all; /* inq_cache.all */
47 struct list_head list; /* unknown or resolve */
48 enum {
49 NAME_NOT_KNOWN,
50 NAME_NEEDED,
51 NAME_PENDING,
52 NAME_KNOWN,
53 } name_state;
54 __u32 timestamp;
55 struct inquiry_data data;
56 };
57
58 struct discovery_state {
59 int type;
60 enum {
61 DISCOVERY_STOPPED,
62 DISCOVERY_STARTING,
63 DISCOVERY_FINDING,
64 DISCOVERY_RESOLVING,
65 DISCOVERY_STOPPING,
66 } state;
67 struct list_head all; /* All devices found during inquiry */
68 struct list_head unknown; /* Name state not known */
69 struct list_head resolve; /* Name needs to be resolved */
70 __u32 timestamp;
71 };
72
73 struct hci_conn_hash {
74 struct list_head list;
75 unsigned int acl_num;
76 unsigned int amp_num;
77 unsigned int sco_num;
78 unsigned int le_num;
79 };
80
81 struct bdaddr_list {
82 struct list_head list;
83 bdaddr_t bdaddr;
84 };
85
86 struct bt_uuid {
87 struct list_head list;
88 u8 uuid[16];
89 u8 size;
90 u8 svc_hint;
91 };
92
93 struct smp_ltk {
94 struct list_head list;
95 bdaddr_t bdaddr;
96 u8 bdaddr_type;
97 u8 authenticated;
98 u8 type;
99 u8 enc_size;
100 __le16 ediv;
101 u8 rand[8];
102 u8 val[16];
103 } __packed;
104
105 struct link_key {
106 struct list_head list;
107 bdaddr_t bdaddr;
108 u8 type;
109 u8 val[HCI_LINK_KEY_SIZE];
110 u8 pin_len;
111 };
112
113 struct oob_data {
114 struct list_head list;
115 bdaddr_t bdaddr;
116 u8 hash[16];
117 u8 randomizer[16];
118 };
119
120 struct le_scan_params {
121 u8 type;
122 u16 interval;
123 u16 window;
124 int timeout;
125 };
126
127 #define HCI_MAX_SHORT_NAME_LENGTH 10
128
129 struct amp_assoc {
130 __u16 len;
131 __u16 offset;
132 __u16 rem_len;
133 __u16 len_so_far;
134 __u8 data[HCI_MAX_AMP_ASSOC_SIZE];
135 };
136
137 #define NUM_REASSEMBLY 4
138 struct hci_dev {
139 struct list_head list;
140 struct mutex lock;
141
142 char name[8];
143 unsigned long flags;
144 __u16 id;
145 __u8 bus;
146 __u8 dev_type;
147 bdaddr_t bdaddr;
148 __u8 dev_name[HCI_MAX_NAME_LENGTH];
149 __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
150 __u8 eir[HCI_MAX_EIR_LENGTH];
151 __u8 dev_class[3];
152 __u8 major_class;
153 __u8 minor_class;
154 __u8 features[8];
155 __u8 host_features[8];
156 __u8 le_features[8];
157 __u8 le_white_list_size;
158 __u8 le_states[8];
159 __u8 commands[64];
160 __u8 hci_ver;
161 __u16 hci_rev;
162 __u8 lmp_ver;
163 __u16 manufacturer;
164 __u16 lmp_subver;
165 __u16 voice_setting;
166 __u8 io_capability;
167 __s8 inq_tx_power;
168 __u16 devid_source;
169 __u16 devid_vendor;
170 __u16 devid_product;
171 __u16 devid_version;
172
173 __u16 pkt_type;
174 __u16 esco_type;
175 __u16 link_policy;
176 __u16 link_mode;
177
178 __u32 idle_timeout;
179 __u16 sniff_min_interval;
180 __u16 sniff_max_interval;
181
182 __u8 amp_status;
183 __u32 amp_total_bw;
184 __u32 amp_max_bw;
185 __u32 amp_min_latency;
186 __u32 amp_max_pdu;
187 __u8 amp_type;
188 __u16 amp_pal_cap;
189 __u16 amp_assoc_size;
190 __u32 amp_max_flush_to;
191 __u32 amp_be_flush_to;
192
193 struct amp_assoc loc_assoc;
194
195 __u8 flow_ctl_mode;
196
197 unsigned int auto_accept_delay;
198
199 unsigned long quirks;
200
201 atomic_t cmd_cnt;
202 unsigned int acl_cnt;
203 unsigned int sco_cnt;
204 unsigned int le_cnt;
205
206 unsigned int acl_mtu;
207 unsigned int sco_mtu;
208 unsigned int le_mtu;
209 unsigned int acl_pkts;
210 unsigned int sco_pkts;
211 unsigned int le_pkts;
212
213 __u16 block_len;
214 __u16 block_mtu;
215 __u16 num_blocks;
216 __u16 block_cnt;
217
218 unsigned long acl_last_tx;
219 unsigned long sco_last_tx;
220 unsigned long le_last_tx;
221
222 struct workqueue_struct *workqueue;
223 struct workqueue_struct *req_workqueue;
224
225 struct work_struct power_on;
226 struct delayed_work power_off;
227
228 __u16 discov_timeout;
229 struct delayed_work discov_off;
230
231 struct delayed_work service_cache;
232
233 struct timer_list cmd_timer;
234
235 struct work_struct rx_work;
236 struct work_struct cmd_work;
237 struct work_struct tx_work;
238
239 struct sk_buff_head rx_q;
240 struct sk_buff_head raw_q;
241 struct sk_buff_head cmd_q;
242
243 struct sk_buff *sent_cmd;
244 struct sk_buff *reassembly[NUM_REASSEMBLY];
245
246 struct mutex req_lock;
247 wait_queue_head_t req_wait_q;
248 __u32 req_status;
249 __u32 req_result;
250
251 __u16 init_last_cmd;
252
253 struct list_head mgmt_pending;
254
255 struct discovery_state discovery;
256 struct hci_conn_hash conn_hash;
257 struct list_head blacklist;
258
259 struct list_head uuids;
260
261 struct list_head link_keys;
262
263 struct list_head long_term_keys;
264
265 struct list_head remote_oob_data;
266
267 struct hci_dev_stats stat;
268
269 struct sk_buff_head driver_init;
270
271 atomic_t promisc;
272
273 struct dentry *debugfs;
274
275 struct device dev;
276
277 struct rfkill *rfkill;
278
279 unsigned long dev_flags;
280
281 struct delayed_work le_scan_disable;
282
283 struct work_struct le_scan;
284 struct le_scan_params le_scan_params;
285
286 __s8 adv_tx_power;
287 __u8 adv_data[HCI_MAX_AD_LENGTH];
288 __u8 adv_data_len;
289
290 int (*open)(struct hci_dev *hdev);
291 int (*close)(struct hci_dev *hdev);
292 int (*flush)(struct hci_dev *hdev);
293 int (*send)(struct sk_buff *skb);
294 void (*notify)(struct hci_dev *hdev, unsigned int evt);
295 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
296 };
297
298 #define HCI_PHY_HANDLE(handle) (handle & 0xff)
299
300 struct hci_conn {
301 struct list_head list;
302
303 atomic_t refcnt;
304
305 bdaddr_t dst;
306 __u8 dst_type;
307 __u16 handle;
308 __u16 state;
309 __u8 mode;
310 __u8 type;
311 bool out;
312 __u8 attempt;
313 __u8 dev_class[3];
314 __u8 features[8];
315 __u16 interval;
316 __u16 pkt_type;
317 __u16 link_policy;
318 __u32 link_mode;
319 __u8 key_type;
320 __u8 auth_type;
321 __u8 sec_level;
322 __u8 pending_sec_level;
323 __u8 pin_length;
324 __u8 enc_key_size;
325 __u8 io_capability;
326 __u32 passkey_notify;
327 __u8 passkey_entered;
328 __u16 disc_timeout;
329 unsigned long flags;
330
331 __u8 remote_cap;
332 __u8 remote_auth;
333 __u8 remote_id;
334 bool flush_key;
335
336 unsigned int sent;
337
338 struct sk_buff_head data_q;
339 struct list_head chan_list;
340
341 struct delayed_work disc_work;
342 struct timer_list idle_timer;
343 struct timer_list auto_accept_timer;
344
345 struct device dev;
346 atomic_t devref;
347
348 struct hci_dev *hdev;
349 void *l2cap_data;
350 void *sco_data;
351 void *smp_conn;
352 struct amp_mgr *amp_mgr;
353
354 struct hci_conn *link;
355
356 void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
357 void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
358 void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason);
359 };
360
361 struct hci_chan {
362 struct list_head list;
363 __u16 handle;
364 struct hci_conn *conn;
365 struct sk_buff_head data_q;
366 unsigned int sent;
367 __u8 state;
368 };
369
370 extern struct list_head hci_dev_list;
371 extern struct list_head hci_cb_list;
372 extern rwlock_t hci_dev_list_lock;
373 extern rwlock_t hci_cb_list_lock;
374
375 /* ----- HCI interface to upper protocols ----- */
376 extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
377 extern void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
378 extern int l2cap_disconn_ind(struct hci_conn *hcon);
379 extern void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
380 extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
381 extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
382 u16 flags);
383
384 extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
385 extern void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
386 extern void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
387 extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
388
389 /* ----- Inquiry cache ----- */
390 #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
391 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
392
393 static inline void discovery_init(struct hci_dev *hdev)
394 {
395 hdev->discovery.state = DISCOVERY_STOPPED;
396 INIT_LIST_HEAD(&hdev->discovery.all);
397 INIT_LIST_HEAD(&hdev->discovery.unknown);
398 INIT_LIST_HEAD(&hdev->discovery.resolve);
399 }
400
401 bool hci_discovery_active(struct hci_dev *hdev);
402
403 void hci_discovery_set_state(struct hci_dev *hdev, int state);
404
405 static inline int inquiry_cache_empty(struct hci_dev *hdev)
406 {
407 return list_empty(&hdev->discovery.all);
408 }
409
410 static inline long inquiry_cache_age(struct hci_dev *hdev)
411 {
412 struct discovery_state *c = &hdev->discovery;
413 return jiffies - c->timestamp;
414 }
415
416 static inline long inquiry_entry_age(struct inquiry_entry *e)
417 {
418 return jiffies - e->timestamp;
419 }
420
421 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
422 bdaddr_t *bdaddr);
423 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
424 bdaddr_t *bdaddr);
425 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
426 bdaddr_t *bdaddr,
427 int state);
428 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
429 struct inquiry_entry *ie);
430 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
431 bool name_known, bool *ssp);
432
433 /* ----- HCI Connections ----- */
434 enum {
435 HCI_CONN_AUTH_PEND,
436 HCI_CONN_REAUTH_PEND,
437 HCI_CONN_ENCRYPT_PEND,
438 HCI_CONN_RSWITCH_PEND,
439 HCI_CONN_MODE_CHANGE_PEND,
440 HCI_CONN_SCO_SETUP_PEND,
441 HCI_CONN_LE_SMP_PEND,
442 HCI_CONN_MGMT_CONNECTED,
443 HCI_CONN_SSP_ENABLED,
444 HCI_CONN_POWER_SAVE,
445 HCI_CONN_REMOTE_OOB,
446 };
447
448 static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
449 {
450 struct hci_dev *hdev = conn->hdev;
451 return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
452 test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
453 }
454
455 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
456 {
457 struct hci_conn_hash *h = &hdev->conn_hash;
458 list_add_rcu(&c->list, &h->list);
459 switch (c->type) {
460 case ACL_LINK:
461 h->acl_num++;
462 break;
463 case AMP_LINK:
464 h->amp_num++;
465 break;
466 case LE_LINK:
467 h->le_num++;
468 break;
469 case SCO_LINK:
470 case ESCO_LINK:
471 h->sco_num++;
472 break;
473 }
474 }
475
476 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
477 {
478 struct hci_conn_hash *h = &hdev->conn_hash;
479
480 list_del_rcu(&c->list);
481 synchronize_rcu();
482
483 switch (c->type) {
484 case ACL_LINK:
485 h->acl_num--;
486 break;
487 case AMP_LINK:
488 h->amp_num--;
489 break;
490 case LE_LINK:
491 h->le_num--;
492 break;
493 case SCO_LINK:
494 case ESCO_LINK:
495 h->sco_num--;
496 break;
497 }
498 }
499
500 static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
501 {
502 struct hci_conn_hash *h = &hdev->conn_hash;
503 switch (type) {
504 case ACL_LINK:
505 return h->acl_num;
506 case AMP_LINK:
507 return h->amp_num;
508 case LE_LINK:
509 return h->le_num;
510 case SCO_LINK:
511 case ESCO_LINK:
512 return h->sco_num;
513 default:
514 return 0;
515 }
516 }
517
518 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
519 __u16 handle)
520 {
521 struct hci_conn_hash *h = &hdev->conn_hash;
522 struct hci_conn *c;
523
524 rcu_read_lock();
525
526 list_for_each_entry_rcu(c, &h->list, list) {
527 if (c->handle == handle) {
528 rcu_read_unlock();
529 return c;
530 }
531 }
532 rcu_read_unlock();
533
534 return NULL;
535 }
536
537 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
538 __u8 type, bdaddr_t *ba)
539 {
540 struct hci_conn_hash *h = &hdev->conn_hash;
541 struct hci_conn *c;
542
543 rcu_read_lock();
544
545 list_for_each_entry_rcu(c, &h->list, list) {
546 if (c->type == type && !bacmp(&c->dst, ba)) {
547 rcu_read_unlock();
548 return c;
549 }
550 }
551
552 rcu_read_unlock();
553
554 return NULL;
555 }
556
557 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
558 __u8 type, __u16 state)
559 {
560 struct hci_conn_hash *h = &hdev->conn_hash;
561 struct hci_conn *c;
562
563 rcu_read_lock();
564
565 list_for_each_entry_rcu(c, &h->list, list) {
566 if (c->type == type && c->state == state) {
567 rcu_read_unlock();
568 return c;
569 }
570 }
571
572 rcu_read_unlock();
573
574 return NULL;
575 }
576
577 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
578 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
579 void hci_sco_setup(struct hci_conn *conn, __u8 status);
580
581 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
582 int hci_conn_del(struct hci_conn *conn);
583 void hci_conn_hash_flush(struct hci_dev *hdev);
584 void hci_conn_check_pending(struct hci_dev *hdev);
585 void hci_conn_accept(struct hci_conn *conn, int mask);
586
587 struct hci_chan *hci_chan_create(struct hci_conn *conn);
588 void hci_chan_del(struct hci_chan *chan);
589 void hci_chan_list_flush(struct hci_conn *conn);
590 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
591
592 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
593 __u8 dst_type, __u8 sec_level, __u8 auth_type);
594 int hci_conn_check_link_mode(struct hci_conn *conn);
595 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
596 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
597 int hci_conn_change_link_key(struct hci_conn *conn);
598 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
599
600 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
601
602 void hci_conn_hold_device(struct hci_conn *conn);
603 void hci_conn_put_device(struct hci_conn *conn);
604
605 static inline void hci_conn_hold(struct hci_conn *conn)
606 {
607 BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
608
609 atomic_inc(&conn->refcnt);
610 cancel_delayed_work(&conn->disc_work);
611 }
612
613 static inline void hci_conn_put(struct hci_conn *conn)
614 {
615 BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
616
617 if (atomic_dec_and_test(&conn->refcnt)) {
618 unsigned long timeo;
619
620 switch (conn->type) {
621 case ACL_LINK:
622 case LE_LINK:
623 del_timer(&conn->idle_timer);
624 if (conn->state == BT_CONNECTED) {
625 timeo = conn->disc_timeout;
626 if (!conn->out)
627 timeo *= 2;
628 } else {
629 timeo = msecs_to_jiffies(10);
630 }
631 break;
632
633 case AMP_LINK:
634 timeo = conn->disc_timeout;
635 break;
636
637 default:
638 timeo = msecs_to_jiffies(10);
639 break;
640 }
641
642 cancel_delayed_work(&conn->disc_work);
643 queue_delayed_work(conn->hdev->workqueue,
644 &conn->disc_work, timeo);
645 }
646 }
647
648 /* ----- HCI Devices ----- */
649 static inline void hci_dev_put(struct hci_dev *d)
650 {
651 BT_DBG("%s orig refcnt %d", d->name,
652 atomic_read(&d->dev.kobj.kref.refcount));
653
654 put_device(&d->dev);
655 }
656
657 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
658 {
659 BT_DBG("%s orig refcnt %d", d->name,
660 atomic_read(&d->dev.kobj.kref.refcount));
661
662 get_device(&d->dev);
663 return d;
664 }
665
666 #define hci_dev_lock(d) mutex_lock(&d->lock)
667 #define hci_dev_unlock(d) mutex_unlock(&d->lock)
668
669 #define to_hci_dev(d) container_of(d, struct hci_dev, dev)
670 #define to_hci_conn(c) container_of(c, struct hci_conn, dev)
671
672 static inline void *hci_get_drvdata(struct hci_dev *hdev)
673 {
674 return dev_get_drvdata(&hdev->dev);
675 }
676
677 static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
678 {
679 dev_set_drvdata(&hdev->dev, data);
680 }
681
682 /* hci_dev_list shall be locked */
683 static inline uint8_t __hci_num_ctrl(void)
684 {
685 uint8_t count = 0;
686 struct list_head *p;
687
688 list_for_each(p, &hci_dev_list) {
689 count++;
690 }
691
692 return count;
693 }
694
695 struct hci_dev *hci_dev_get(int index);
696 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src);
697
698 struct hci_dev *hci_alloc_dev(void);
699 void hci_free_dev(struct hci_dev *hdev);
700 int hci_register_dev(struct hci_dev *hdev);
701 void hci_unregister_dev(struct hci_dev *hdev);
702 int hci_suspend_dev(struct hci_dev *hdev);
703 int hci_resume_dev(struct hci_dev *hdev);
704 int hci_dev_open(__u16 dev);
705 int hci_dev_close(__u16 dev);
706 int hci_dev_reset(__u16 dev);
707 int hci_dev_reset_stat(__u16 dev);
708 int hci_dev_cmd(unsigned int cmd, void __user *arg);
709 int hci_get_dev_list(void __user *arg);
710 int hci_get_dev_info(void __user *arg);
711 int hci_get_conn_list(void __user *arg);
712 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
713 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
714 int hci_inquiry(void __user *arg);
715
716 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
717 bdaddr_t *bdaddr);
718 int hci_blacklist_clear(struct hci_dev *hdev);
719 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
720 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
721
722 int hci_uuids_clear(struct hci_dev *hdev);
723
724 int hci_link_keys_clear(struct hci_dev *hdev);
725 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
726 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
727 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len);
728 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]);
729 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
730 int new_key, u8 authenticated, u8 tk[16], u8 enc_size,
731 __le16 ediv, u8 rand[8]);
732 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
733 u8 addr_type);
734 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr);
735 int hci_smp_ltks_clear(struct hci_dev *hdev);
736 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
737
738 int hci_remote_oob_data_clear(struct hci_dev *hdev);
739 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
740 bdaddr_t *bdaddr);
741 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
742 u8 *randomizer);
743 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
744
745 int hci_update_ad(struct hci_dev *hdev);
746
747 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
748
749 int hci_recv_frame(struct sk_buff *skb);
750 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
751 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
752
753 void hci_init_sysfs(struct hci_dev *hdev);
754 int hci_add_sysfs(struct hci_dev *hdev);
755 void hci_del_sysfs(struct hci_dev *hdev);
756 void hci_conn_init_sysfs(struct hci_conn *conn);
757 void hci_conn_add_sysfs(struct hci_conn *conn);
758 void hci_conn_del_sysfs(struct hci_conn *conn);
759
760 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
761
762 /* ----- LMP capabilities ----- */
763 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
764 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
765 #define lmp_hold_capable(dev) ((dev)->features[0] & LMP_HOLD)
766 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
767 #define lmp_park_capable(dev) ((dev)->features[1] & LMP_PARK)
768 #define lmp_inq_rssi_capable(dev) ((dev)->features[3] & LMP_RSSI_INQ)
769 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
770 #define lmp_bredr_capable(dev) (!((dev)->features[4] & LMP_NO_BREDR))
771 #define lmp_le_capable(dev) ((dev)->features[4] & LMP_LE)
772 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
773 #define lmp_pause_enc_capable(dev) ((dev)->features[5] & LMP_PAUSE_ENC)
774 #define lmp_ext_inq_capable(dev) ((dev)->features[6] & LMP_EXT_INQ)
775 #define lmp_le_br_capable(dev) !!((dev)->features[6] & LMP_SIMUL_LE_BR)
776 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
777 #define lmp_no_flush_capable(dev) ((dev)->features[6] & LMP_NO_FLUSH)
778 #define lmp_lsto_capable(dev) ((dev)->features[7] & LMP_LSTO)
779 #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[7] & LMP_INQ_TX_PWR)
780 #define lmp_ext_feat_capable(dev) ((dev)->features[7] & LMP_EXTFEATURES)
781
782 /* ----- Extended LMP capabilities ----- */
783 #define lmp_host_ssp_capable(dev) ((dev)->host_features[0] & LMP_HOST_SSP)
784 #define lmp_host_le_capable(dev) !!((dev)->host_features[0] & LMP_HOST_LE)
785 #define lmp_host_le_br_capable(dev) !!((dev)->host_features[0] & LMP_HOST_LE_BREDR)
786
787 /* returns true if at least one AMP active */
788 static inline bool hci_amp_capable(void)
789 {
790 struct hci_dev *hdev;
791 bool ret = false;
792
793 read_lock(&hci_dev_list_lock);
794 list_for_each_entry(hdev, &hci_dev_list, list)
795 if (hdev->amp_type == HCI_AMP &&
796 test_bit(HCI_UP, &hdev->flags))
797 ret = true;
798 read_unlock(&hci_dev_list_lock);
799
800 return ret;
801 }
802
803 /* ----- HCI protocols ----- */
804 #define HCI_PROTO_DEFER 0x01
805
806 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
807 __u8 type, __u8 *flags)
808 {
809 switch (type) {
810 case ACL_LINK:
811 return l2cap_connect_ind(hdev, bdaddr);
812
813 case SCO_LINK:
814 case ESCO_LINK:
815 return sco_connect_ind(hdev, bdaddr, flags);
816
817 default:
818 BT_ERR("unknown link type %d", type);
819 return -EINVAL;
820 }
821 }
822
823 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
824 {
825 switch (conn->type) {
826 case ACL_LINK:
827 case LE_LINK:
828 l2cap_connect_cfm(conn, status);
829 break;
830
831 case SCO_LINK:
832 case ESCO_LINK:
833 sco_connect_cfm(conn, status);
834 break;
835
836 default:
837 BT_ERR("unknown link type %d", conn->type);
838 break;
839 }
840
841 if (conn->connect_cfm_cb)
842 conn->connect_cfm_cb(conn, status);
843 }
844
845 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
846 {
847 if (conn->type != ACL_LINK && conn->type != LE_LINK)
848 return HCI_ERROR_REMOTE_USER_TERM;
849
850 return l2cap_disconn_ind(conn);
851 }
852
853 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
854 {
855 switch (conn->type) {
856 case ACL_LINK:
857 case LE_LINK:
858 l2cap_disconn_cfm(conn, reason);
859 break;
860
861 case SCO_LINK:
862 case ESCO_LINK:
863 sco_disconn_cfm(conn, reason);
864 break;
865
866 /* L2CAP would be handled for BREDR chan */
867 case AMP_LINK:
868 break;
869
870 default:
871 BT_ERR("unknown link type %d", conn->type);
872 break;
873 }
874
875 if (conn->disconn_cfm_cb)
876 conn->disconn_cfm_cb(conn, reason);
877 }
878
879 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
880 {
881 __u8 encrypt;
882
883 if (conn->type != ACL_LINK && conn->type != LE_LINK)
884 return;
885
886 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
887 return;
888
889 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
890 l2cap_security_cfm(conn, status, encrypt);
891
892 if (conn->security_cfm_cb)
893 conn->security_cfm_cb(conn, status);
894 }
895
896 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status,
897 __u8 encrypt)
898 {
899 if (conn->type != ACL_LINK && conn->type != LE_LINK)
900 return;
901
902 l2cap_security_cfm(conn, status, encrypt);
903
904 if (conn->security_cfm_cb)
905 conn->security_cfm_cb(conn, status);
906 }
907
908 /* ----- HCI callbacks ----- */
909 struct hci_cb {
910 struct list_head list;
911
912 char *name;
913
914 void (*security_cfm) (struct hci_conn *conn, __u8 status,
915 __u8 encrypt);
916 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
917 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
918 };
919
920 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
921 {
922 struct hci_cb *cb;
923 __u8 encrypt;
924
925 hci_proto_auth_cfm(conn, status);
926
927 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
928 return;
929
930 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
931
932 read_lock(&hci_cb_list_lock);
933 list_for_each_entry(cb, &hci_cb_list, list) {
934 if (cb->security_cfm)
935 cb->security_cfm(conn, status, encrypt);
936 }
937 read_unlock(&hci_cb_list_lock);
938 }
939
940 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
941 __u8 encrypt)
942 {
943 struct hci_cb *cb;
944
945 if (conn->sec_level == BT_SECURITY_SDP)
946 conn->sec_level = BT_SECURITY_LOW;
947
948 if (conn->pending_sec_level > conn->sec_level)
949 conn->sec_level = conn->pending_sec_level;
950
951 hci_proto_encrypt_cfm(conn, status, encrypt);
952
953 read_lock(&hci_cb_list_lock);
954 list_for_each_entry(cb, &hci_cb_list, list) {
955 if (cb->security_cfm)
956 cb->security_cfm(conn, status, encrypt);
957 }
958 read_unlock(&hci_cb_list_lock);
959 }
960
961 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
962 {
963 struct hci_cb *cb;
964
965 read_lock(&hci_cb_list_lock);
966 list_for_each_entry(cb, &hci_cb_list, list) {
967 if (cb->key_change_cfm)
968 cb->key_change_cfm(conn, status);
969 }
970 read_unlock(&hci_cb_list_lock);
971 }
972
973 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
974 __u8 role)
975 {
976 struct hci_cb *cb;
977
978 read_lock(&hci_cb_list_lock);
979 list_for_each_entry(cb, &hci_cb_list, list) {
980 if (cb->role_switch_cfm)
981 cb->role_switch_cfm(conn, status, role);
982 }
983 read_unlock(&hci_cb_list_lock);
984 }
985
986 static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
987 {
988 size_t parsed = 0;
989
990 if (data_len < 2)
991 return false;
992
993 while (parsed < data_len - 1) {
994 u8 field_len = data[0];
995
996 if (field_len == 0)
997 break;
998
999 parsed += field_len + 1;
1000
1001 if (parsed > data_len)
1002 break;
1003
1004 if (data[1] == type)
1005 return true;
1006
1007 data += field_len + 1;
1008 }
1009
1010 return false;
1011 }
1012
1013 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
1014 {
1015 size_t parsed = 0;
1016
1017 while (parsed < eir_len) {
1018 u8 field_len = eir[0];
1019
1020 if (field_len == 0)
1021 return parsed;
1022
1023 parsed += field_len + 1;
1024 eir += field_len + 1;
1025 }
1026
1027 return eir_len;
1028 }
1029
1030 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
1031 u8 data_len)
1032 {
1033 eir[eir_len++] = sizeof(type) + data_len;
1034 eir[eir_len++] = type;
1035 memcpy(&eir[eir_len], data, data_len);
1036 eir_len += data_len;
1037
1038 return eir_len;
1039 }
1040
1041 int hci_register_cb(struct hci_cb *hcb);
1042 int hci_unregister_cb(struct hci_cb *hcb);
1043
1044 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
1045 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
1046 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
1047
1048 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
1049
1050 /* ----- HCI Sockets ----- */
1051 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
1052 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk);
1053 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
1054
1055 void hci_sock_dev_event(struct hci_dev *hdev, int event);
1056
1057 /* Management interface */
1058 #define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
1059 #define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
1060 BIT(BDADDR_LE_RANDOM))
1061 #define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \
1062 BIT(BDADDR_LE_PUBLIC) | \
1063 BIT(BDADDR_LE_RANDOM))
1064
1065 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
1066 int mgmt_index_added(struct hci_dev *hdev);
1067 int mgmt_index_removed(struct hci_dev *hdev);
1068 int mgmt_powered(struct hci_dev *hdev, u8 powered);
1069 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
1070 int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
1071 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
1072 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
1073 bool persistent);
1074 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1075 u8 addr_type, u32 flags, u8 *name, u8 name_len,
1076 u8 *dev_class);
1077 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
1078 u8 link_type, u8 addr_type, u8 reason);
1079 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
1080 u8 link_type, u8 addr_type, u8 status);
1081 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1082 u8 addr_type, u8 status);
1083 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
1084 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1085 u8 status);
1086 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1087 u8 status);
1088 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
1089 u8 link_type, u8 addr_type, __le32 value,
1090 u8 confirm_hint);
1091 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1092 u8 link_type, u8 addr_type, u8 status);
1093 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1094 u8 link_type, u8 addr_type, u8 status);
1095 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
1096 u8 link_type, u8 addr_type);
1097 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1098 u8 link_type, u8 addr_type, u8 status);
1099 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1100 u8 link_type, u8 addr_type, u8 status);
1101 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
1102 u8 link_type, u8 addr_type, u32 passkey,
1103 u8 entered);
1104 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1105 u8 addr_type, u8 status);
1106 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
1107 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
1108 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
1109 u8 status);
1110 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
1111 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
1112 u8 *randomizer, u8 status);
1113 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
1114 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1115 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
1116 u8 ssp, u8 *eir, u16 eir_len);
1117 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1118 u8 addr_type, s8 rssi, u8 *name, u8 name_len);
1119 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status);
1120 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status);
1121 int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
1122 int mgmt_interleaved_discovery(struct hci_dev *hdev);
1123 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
1124 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
1125 bool mgmt_valid_hdev(struct hci_dev *hdev);
1126 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
1127
1128 /* HCI info for socket */
1129 #define hci_pi(sk) ((struct hci_pinfo *) sk)
1130
1131 struct hci_pinfo {
1132 struct bt_sock bt;
1133 struct hci_dev *hdev;
1134 struct hci_filter filter;
1135 __u32 cmsg_mask;
1136 unsigned short channel;
1137 };
1138
1139 /* HCI security filter */
1140 #define HCI_SFLT_MAX_OGF 5
1141
1142 struct hci_sec_filter {
1143 __u32 type_mask;
1144 __u32 event_mask[2];
1145 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
1146 };
1147
1148 /* ----- HCI requests ----- */
1149 #define HCI_REQ_DONE 0
1150 #define HCI_REQ_PEND 1
1151 #define HCI_REQ_CANCELED 2
1152
1153 #define hci_req_lock(d) mutex_lock(&d->req_lock)
1154 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
1155
1156 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result);
1157
1158 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
1159 u16 latency, u16 to_multiplier);
1160 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
1161 __u8 ltk[16]);
1162 int hci_do_inquiry(struct hci_dev *hdev, u8 length);
1163 int hci_cancel_inquiry(struct hci_dev *hdev);
1164 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1165 int timeout);
1166 int hci_cancel_le_scan(struct hci_dev *hdev);
1167
1168 u8 bdaddr_to_le(u8 bdaddr_type);
1169
1170 #endif /* __HCI_CORE_H */