2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
28 #include <net/bluetooth/hci.h>
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
34 /* HCI Core structures */
38 __u8 pscan_period_mode
;
46 struct inquiry_entry
{
47 struct inquiry_entry
*next
;
49 struct inquiry_data data
;
52 struct inquiry_cache
{
55 struct inquiry_entry
*list
;
58 struct hci_conn_hash
{
59 struct list_head list
;
66 struct list_head list
;
71 struct list_head list
;
75 #define NUM_REASSEMBLY 4
77 struct list_head list
;
103 __u16 sniff_min_interval
;
104 __u16 sniff_max_interval
;
106 unsigned long quirks
;
109 unsigned int acl_cnt
;
110 unsigned int sco_cnt
;
112 unsigned int acl_mtu
;
113 unsigned int sco_mtu
;
114 unsigned int acl_pkts
;
115 unsigned int sco_pkts
;
117 unsigned long cmd_last_tx
;
118 unsigned long acl_last_tx
;
119 unsigned long sco_last_tx
;
121 struct workqueue_struct
*workqueue
;
123 struct work_struct power_on
;
124 struct work_struct power_off
;
125 struct timer_list off_timer
;
127 struct tasklet_struct cmd_task
;
128 struct tasklet_struct rx_task
;
129 struct tasklet_struct tx_task
;
131 struct sk_buff_head rx_q
;
132 struct sk_buff_head raw_q
;
133 struct sk_buff_head cmd_q
;
135 struct sk_buff
*sent_cmd
;
136 struct sk_buff
*reassembly
[NUM_REASSEMBLY
];
138 struct mutex req_lock
;
139 wait_queue_head_t req_wait_q
;
144 struct inquiry_cache inq_cache
;
145 struct hci_conn_hash conn_hash
;
146 struct list_head blacklist
;
148 struct list_head uuids
;
150 struct hci_dev_stats stat
;
152 struct sk_buff_head driver_init
;
159 struct dentry
*debugfs
;
161 struct device
*parent
;
164 struct rfkill
*rfkill
;
166 struct module
*owner
;
168 int (*open
)(struct hci_dev
*hdev
);
169 int (*close
)(struct hci_dev
*hdev
);
170 int (*flush
)(struct hci_dev
*hdev
);
171 int (*send
)(struct sk_buff
*skb
);
172 void (*destruct
)(struct hci_dev
*hdev
);
173 void (*notify
)(struct hci_dev
*hdev
, unsigned int evt
);
174 int (*ioctl
)(struct hci_dev
*hdev
, unsigned int cmd
, unsigned long arg
);
178 struct list_head list
;
199 __u8 pending_sec_level
;
210 struct sk_buff_head data_q
;
212 struct timer_list disc_timer
;
213 struct timer_list idle_timer
;
215 struct work_struct work_add
;
216 struct work_struct work_del
;
221 struct hci_dev
*hdev
;
226 struct hci_conn
*link
;
229 extern struct hci_proto
*hci_proto
[];
230 extern struct list_head hci_dev_list
;
231 extern struct list_head hci_cb_list
;
232 extern rwlock_t hci_dev_list_lock
;
233 extern rwlock_t hci_cb_list_lock
;
235 /* ----- Inquiry cache ----- */
236 #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
237 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
239 #define inquiry_cache_lock(c) spin_lock(&c->lock)
240 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
241 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
242 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
244 static inline void inquiry_cache_init(struct hci_dev
*hdev
)
246 struct inquiry_cache
*c
= &hdev
->inq_cache
;
247 spin_lock_init(&c
->lock
);
251 static inline int inquiry_cache_empty(struct hci_dev
*hdev
)
253 struct inquiry_cache
*c
= &hdev
->inq_cache
;
254 return c
->list
== NULL
;
257 static inline long inquiry_cache_age(struct hci_dev
*hdev
)
259 struct inquiry_cache
*c
= &hdev
->inq_cache
;
260 return jiffies
- c
->timestamp
;
263 static inline long inquiry_entry_age(struct inquiry_entry
*e
)
265 return jiffies
- e
->timestamp
;
268 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
);
269 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
);
271 /* ----- HCI Connections ----- */
274 HCI_CONN_ENCRYPT_PEND
,
275 HCI_CONN_RSWITCH_PEND
,
276 HCI_CONN_MODE_CHANGE_PEND
,
277 HCI_CONN_SCO_SETUP_PEND
,
280 static inline void hci_conn_hash_init(struct hci_dev
*hdev
)
282 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
283 INIT_LIST_HEAD(&h
->list
);
284 spin_lock_init(&h
->lock
);
289 static inline void hci_conn_hash_add(struct hci_dev
*hdev
, struct hci_conn
*c
)
291 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
292 list_add(&c
->list
, &h
->list
);
293 if (c
->type
== ACL_LINK
)
299 static inline void hci_conn_hash_del(struct hci_dev
*hdev
, struct hci_conn
*c
)
301 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
303 if (c
->type
== ACL_LINK
)
309 static inline struct hci_conn
*hci_conn_hash_lookup_handle(struct hci_dev
*hdev
,
312 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
316 list_for_each(p
, &h
->list
) {
317 c
= list_entry(p
, struct hci_conn
, list
);
318 if (c
->handle
== handle
)
324 static inline struct hci_conn
*hci_conn_hash_lookup_ba(struct hci_dev
*hdev
,
325 __u8 type
, bdaddr_t
*ba
)
327 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
331 list_for_each(p
, &h
->list
) {
332 c
= list_entry(p
, struct hci_conn
, list
);
333 if (c
->type
== type
&& !bacmp(&c
->dst
, ba
))
339 static inline struct hci_conn
*hci_conn_hash_lookup_state(struct hci_dev
*hdev
,
340 __u8 type
, __u16 state
)
342 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
346 list_for_each(p
, &h
->list
) {
347 c
= list_entry(p
, struct hci_conn
, list
);
348 if (c
->type
== type
&& c
->state
== state
)
354 void hci_acl_connect(struct hci_conn
*conn
);
355 void hci_acl_disconn(struct hci_conn
*conn
, __u8 reason
);
356 void hci_add_sco(struct hci_conn
*conn
, __u16 handle
);
357 void hci_setup_sync(struct hci_conn
*conn
, __u16 handle
);
358 void hci_sco_setup(struct hci_conn
*conn
, __u8 status
);
360 struct hci_conn
*hci_conn_add(struct hci_dev
*hdev
, int type
, bdaddr_t
*dst
);
361 int hci_conn_del(struct hci_conn
*conn
);
362 void hci_conn_hash_flush(struct hci_dev
*hdev
);
363 void hci_conn_check_pending(struct hci_dev
*hdev
);
365 struct hci_conn
*hci_connect(struct hci_dev
*hdev
, int type
, bdaddr_t
*dst
, __u8 sec_level
, __u8 auth_type
);
366 int hci_conn_check_link_mode(struct hci_conn
*conn
);
367 int hci_conn_security(struct hci_conn
*conn
, __u8 sec_level
, __u8 auth_type
);
368 int hci_conn_change_link_key(struct hci_conn
*conn
);
369 int hci_conn_switch_role(struct hci_conn
*conn
, __u8 role
);
371 void hci_conn_enter_active_mode(struct hci_conn
*conn
);
372 void hci_conn_enter_sniff_mode(struct hci_conn
*conn
);
374 void hci_conn_hold_device(struct hci_conn
*conn
);
375 void hci_conn_put_device(struct hci_conn
*conn
);
377 static inline void hci_conn_hold(struct hci_conn
*conn
)
379 atomic_inc(&conn
->refcnt
);
380 del_timer(&conn
->disc_timer
);
383 static inline void hci_conn_put(struct hci_conn
*conn
)
385 if (atomic_dec_and_test(&conn
->refcnt
)) {
387 if (conn
->type
== ACL_LINK
) {
388 del_timer(&conn
->idle_timer
);
389 if (conn
->state
== BT_CONNECTED
) {
390 timeo
= msecs_to_jiffies(conn
->disc_timeout
);
394 timeo
= msecs_to_jiffies(10);
396 timeo
= msecs_to_jiffies(10);
397 mod_timer(&conn
->disc_timer
, jiffies
+ timeo
);
401 /* ----- HCI Devices ----- */
402 static inline void __hci_dev_put(struct hci_dev
*d
)
404 if (atomic_dec_and_test(&d
->refcnt
))
408 static inline void hci_dev_put(struct hci_dev
*d
)
411 module_put(d
->owner
);
414 static inline struct hci_dev
*__hci_dev_hold(struct hci_dev
*d
)
416 atomic_inc(&d
->refcnt
);
420 static inline struct hci_dev
*hci_dev_hold(struct hci_dev
*d
)
422 if (try_module_get(d
->owner
))
423 return __hci_dev_hold(d
);
427 #define hci_dev_lock(d) spin_lock(&d->lock)
428 #define hci_dev_unlock(d) spin_unlock(&d->lock)
429 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
430 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
432 struct hci_dev
*hci_dev_get(int index
);
433 struct hci_dev
*hci_get_route(bdaddr_t
*src
, bdaddr_t
*dst
);
435 struct hci_dev
*hci_alloc_dev(void);
436 void hci_free_dev(struct hci_dev
*hdev
);
437 int hci_register_dev(struct hci_dev
*hdev
);
438 int hci_unregister_dev(struct hci_dev
*hdev
);
439 int hci_suspend_dev(struct hci_dev
*hdev
);
440 int hci_resume_dev(struct hci_dev
*hdev
);
441 int hci_dev_open(__u16 dev
);
442 int hci_dev_close(__u16 dev
);
443 int hci_dev_reset(__u16 dev
);
444 int hci_dev_reset_stat(__u16 dev
);
445 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
);
446 int hci_get_dev_list(void __user
*arg
);
447 int hci_get_dev_info(void __user
*arg
);
448 int hci_get_conn_list(void __user
*arg
);
449 int hci_get_conn_info(struct hci_dev
*hdev
, void __user
*arg
);
450 int hci_get_auth_info(struct hci_dev
*hdev
, void __user
*arg
);
451 int hci_inquiry(void __user
*arg
);
453 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
);
454 int hci_blacklist_clear(struct hci_dev
*hdev
);
456 int hci_uuids_clear(struct hci_dev
*hdev
);
458 void hci_del_off_timer(struct hci_dev
*hdev
);
460 void hci_event_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
);
462 int hci_recv_frame(struct sk_buff
*skb
);
463 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
);
464 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
);
466 int hci_register_sysfs(struct hci_dev
*hdev
);
467 void hci_unregister_sysfs(struct hci_dev
*hdev
);
468 void hci_conn_init_sysfs(struct hci_conn
*conn
);
469 void hci_conn_add_sysfs(struct hci_conn
*conn
);
470 void hci_conn_del_sysfs(struct hci_conn
*conn
);
472 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
474 /* ----- LMP capabilities ----- */
475 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
476 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
477 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
478 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
479 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
480 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
481 #define lmp_no_flush_capable(dev) ((dev)->features[6] & LMP_NO_FLUSH)
483 /* ----- HCI protocols ----- */
491 int (*connect_ind
) (struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, __u8 type
);
492 int (*connect_cfm
) (struct hci_conn
*conn
, __u8 status
);
493 int (*disconn_ind
) (struct hci_conn
*conn
);
494 int (*disconn_cfm
) (struct hci_conn
*conn
, __u8 reason
);
495 int (*recv_acldata
) (struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
);
496 int (*recv_scodata
) (struct hci_conn
*conn
, struct sk_buff
*skb
);
497 int (*security_cfm
) (struct hci_conn
*conn
, __u8 status
, __u8 encrypt
);
500 static inline int hci_proto_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, __u8 type
)
502 register struct hci_proto
*hp
;
505 hp
= hci_proto
[HCI_PROTO_L2CAP
];
506 if (hp
&& hp
->connect_ind
)
507 mask
|= hp
->connect_ind(hdev
, bdaddr
, type
);
509 hp
= hci_proto
[HCI_PROTO_SCO
];
510 if (hp
&& hp
->connect_ind
)
511 mask
|= hp
->connect_ind(hdev
, bdaddr
, type
);
516 static inline void hci_proto_connect_cfm(struct hci_conn
*conn
, __u8 status
)
518 register struct hci_proto
*hp
;
520 hp
= hci_proto
[HCI_PROTO_L2CAP
];
521 if (hp
&& hp
->connect_cfm
)
522 hp
->connect_cfm(conn
, status
);
524 hp
= hci_proto
[HCI_PROTO_SCO
];
525 if (hp
&& hp
->connect_cfm
)
526 hp
->connect_cfm(conn
, status
);
529 static inline int hci_proto_disconn_ind(struct hci_conn
*conn
)
531 register struct hci_proto
*hp
;
534 hp
= hci_proto
[HCI_PROTO_L2CAP
];
535 if (hp
&& hp
->disconn_ind
)
536 reason
= hp
->disconn_ind(conn
);
538 hp
= hci_proto
[HCI_PROTO_SCO
];
539 if (hp
&& hp
->disconn_ind
)
540 reason
= hp
->disconn_ind(conn
);
545 static inline void hci_proto_disconn_cfm(struct hci_conn
*conn
, __u8 reason
)
547 register struct hci_proto
*hp
;
549 hp
= hci_proto
[HCI_PROTO_L2CAP
];
550 if (hp
&& hp
->disconn_cfm
)
551 hp
->disconn_cfm(conn
, reason
);
553 hp
= hci_proto
[HCI_PROTO_SCO
];
554 if (hp
&& hp
->disconn_cfm
)
555 hp
->disconn_cfm(conn
, reason
);
558 static inline void hci_proto_auth_cfm(struct hci_conn
*conn
, __u8 status
)
560 register struct hci_proto
*hp
;
563 if (test_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->pend
))
566 encrypt
= (conn
->link_mode
& HCI_LM_ENCRYPT
) ? 0x01 : 0x00;
568 hp
= hci_proto
[HCI_PROTO_L2CAP
];
569 if (hp
&& hp
->security_cfm
)
570 hp
->security_cfm(conn
, status
, encrypt
);
572 hp
= hci_proto
[HCI_PROTO_SCO
];
573 if (hp
&& hp
->security_cfm
)
574 hp
->security_cfm(conn
, status
, encrypt
);
577 static inline void hci_proto_encrypt_cfm(struct hci_conn
*conn
, __u8 status
, __u8 encrypt
)
579 register struct hci_proto
*hp
;
581 hp
= hci_proto
[HCI_PROTO_L2CAP
];
582 if (hp
&& hp
->security_cfm
)
583 hp
->security_cfm(conn
, status
, encrypt
);
585 hp
= hci_proto
[HCI_PROTO_SCO
];
586 if (hp
&& hp
->security_cfm
)
587 hp
->security_cfm(conn
, status
, encrypt
);
590 int hci_register_proto(struct hci_proto
*hproto
);
591 int hci_unregister_proto(struct hci_proto
*hproto
);
593 /* ----- HCI callbacks ----- */
595 struct list_head list
;
599 void (*security_cfm
) (struct hci_conn
*conn
, __u8 status
, __u8 encrypt
);
600 void (*key_change_cfm
) (struct hci_conn
*conn
, __u8 status
);
601 void (*role_switch_cfm
) (struct hci_conn
*conn
, __u8 status
, __u8 role
);
604 static inline void hci_auth_cfm(struct hci_conn
*conn
, __u8 status
)
609 hci_proto_auth_cfm(conn
, status
);
611 if (test_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->pend
))
614 encrypt
= (conn
->link_mode
& HCI_LM_ENCRYPT
) ? 0x01 : 0x00;
616 read_lock_bh(&hci_cb_list_lock
);
617 list_for_each(p
, &hci_cb_list
) {
618 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
619 if (cb
->security_cfm
)
620 cb
->security_cfm(conn
, status
, encrypt
);
622 read_unlock_bh(&hci_cb_list_lock
);
625 static inline void hci_encrypt_cfm(struct hci_conn
*conn
, __u8 status
, __u8 encrypt
)
629 if (conn
->sec_level
== BT_SECURITY_SDP
)
630 conn
->sec_level
= BT_SECURITY_LOW
;
632 hci_proto_encrypt_cfm(conn
, status
, encrypt
);
634 read_lock_bh(&hci_cb_list_lock
);
635 list_for_each(p
, &hci_cb_list
) {
636 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
637 if (cb
->security_cfm
)
638 cb
->security_cfm(conn
, status
, encrypt
);
640 read_unlock_bh(&hci_cb_list_lock
);
643 static inline void hci_key_change_cfm(struct hci_conn
*conn
, __u8 status
)
647 read_lock_bh(&hci_cb_list_lock
);
648 list_for_each(p
, &hci_cb_list
) {
649 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
650 if (cb
->key_change_cfm
)
651 cb
->key_change_cfm(conn
, status
);
653 read_unlock_bh(&hci_cb_list_lock
);
656 static inline void hci_role_switch_cfm(struct hci_conn
*conn
, __u8 status
, __u8 role
)
660 read_lock_bh(&hci_cb_list_lock
);
661 list_for_each(p
, &hci_cb_list
) {
662 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
663 if (cb
->role_switch_cfm
)
664 cb
->role_switch_cfm(conn
, status
, role
);
666 read_unlock_bh(&hci_cb_list_lock
);
669 int hci_register_cb(struct hci_cb
*hcb
);
670 int hci_unregister_cb(struct hci_cb
*hcb
);
672 int hci_register_notifier(struct notifier_block
*nb
);
673 int hci_unregister_notifier(struct notifier_block
*nb
);
675 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
);
676 void hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
);
677 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
);
679 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
);
681 void hci_si_event(struct hci_dev
*hdev
, int type
, int dlen
, void *data
);
683 /* ----- HCI Sockets ----- */
684 void hci_send_to_sock(struct hci_dev
*hdev
, struct sk_buff
*skb
,
685 struct sock
*skip_sk
);
687 /* Management interface */
688 int mgmt_control(struct sock
*sk
, struct msghdr
*msg
, size_t len
);
689 int mgmt_index_added(u16 index
);
690 int mgmt_index_removed(u16 index
);
691 int mgmt_powered(u16 index
, u8 powered
);
692 int mgmt_discoverable(u16 index
, u8 discoverable
);
693 int mgmt_connectable(u16 index
, u8 connectable
);
695 /* HCI info for socket */
696 #define hci_pi(sk) ((struct hci_pinfo *) sk)
700 struct hci_dev
*hdev
;
701 struct hci_filter filter
;
703 unsigned short channel
;
706 /* HCI security filter */
707 #define HCI_SFLT_MAX_OGF 5
709 struct hci_sec_filter
{
712 __u32 ocf_mask
[HCI_SFLT_MAX_OGF
+ 1][4];
715 /* ----- HCI requests ----- */
716 #define HCI_REQ_DONE 0
717 #define HCI_REQ_PEND 1
718 #define HCI_REQ_CANCELED 2
720 #define hci_req_lock(d) mutex_lock(&d->req_lock)
721 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
723 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
);
725 #endif /* __HCI_CORE_H */