Bluetooth: Track discovery type
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
index b84458dcc2261259d83e1bad2aecef95ed622113..29a9b01c3b9ba505f2d38340968275812198a322 100644 (file)
@@ -1,6 +1,7 @@
 /*
    BlueZ - Bluetooth protocol stack for Linux
    Copyright (C) 2000-2001 Qualcomm Incorporated
+   Copyright (C) 2011 ProFUSION Embedded Systems
 
    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
 
 
 #define AUTO_OFF_TIMEOUT 2000
 
-static void hci_cmd_task(unsigned long arg);
-static void hci_rx_task(unsigned long arg);
-static void hci_tx_task(unsigned long arg);
+bool enable_hs;
 
-static DEFINE_RWLOCK(hci_task_lock);
+static void hci_rx_work(struct work_struct *work);
+static void hci_cmd_work(struct work_struct *work);
+static void hci_tx_work(struct work_struct *work);
 
 /* HCI device list */
 LIST_HEAD(hci_dev_list);
@@ -68,10 +69,6 @@ DEFINE_RWLOCK(hci_dev_list_lock);
 LIST_HEAD(hci_cb_list);
 DEFINE_RWLOCK(hci_cb_list_lock);
 
-/* HCI protocols */
-#define HCI_MAX_PROTO  2
-struct hci_proto *hci_proto[HCI_MAX_PROTO];
-
 /* HCI notifiers list */
 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
 
@@ -190,33 +187,20 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
        hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
 }
 
-static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
+static void bredr_init(struct hci_dev *hdev)
 {
        struct hci_cp_delete_stored_link_key cp;
-       struct sk_buff *skb;
        __le16 param;
        __u8 flt_type;
 
-       BT_DBG("%s %ld", hdev->name, opt);
-
-       /* Driver initialization */
-
-       /* Special commands */
-       while ((skb = skb_dequeue(&hdev->driver_init))) {
-               bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
-               skb->dev = (void *) hdev;
-
-               skb_queue_tail(&hdev->cmd_q, skb);
-               tasklet_schedule(&hdev->cmd_task);
-       }
-       skb_queue_purge(&hdev->driver_init);
+       hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
 
        /* Mandatory initialization */
 
        /* Reset */
        if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
-                       set_bit(HCI_RESET, &hdev->flags);
-                       hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
+               set_bit(HCI_RESET, &hdev->flags);
+               hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
        }
 
        /* Read Local Supported Features */
@@ -228,18 +212,6 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
        /* Read Buffer Size (ACL mtu, max pkt, etc.) */
        hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
 
-#if 0
-       /* Host buffer size */
-       {
-               struct hci_cp_host_buffer_size cp;
-               cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
-               cp.sco_mtu = HCI_MAX_SCO_SIZE;
-               cp.acl_max_pkt = cpu_to_le16(0xffff);
-               cp.sco_max_pkt = cpu_to_le16(0xffff);
-               hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
-       }
-#endif
-
        /* Read BD Address */
        hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
 
@@ -267,6 +239,51 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
        hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
 }
 
+static void amp_init(struct hci_dev *hdev)
+{
+       hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
+
+       /* Reset */
+       hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
+
+       /* Read Local Version */
+       hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+}
+
+static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
+{
+       struct sk_buff *skb;
+
+       BT_DBG("%s %ld", hdev->name, opt);
+
+       /* Driver initialization */
+
+       /* Special commands */
+       while ((skb = skb_dequeue(&hdev->driver_init))) {
+               bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+               skb->dev = (void *) hdev;
+
+               skb_queue_tail(&hdev->cmd_q, skb);
+               queue_work(hdev->workqueue, &hdev->cmd_work);
+       }
+       skb_queue_purge(&hdev->driver_init);
+
+       switch (hdev->dev_type) {
+       case HCI_BREDR:
+               bredr_init(hdev);
+               break;
+
+       case HCI_AMP:
+               amp_init(hdev);
+               break;
+
+       default:
+               BT_ERR("Unknown device type %d", hdev->dev_type);
+               break;
+       }
+
+}
+
 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
 {
        BT_DBG("%s", hdev->name);
@@ -319,8 +336,7 @@ static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
  * Device is held on return. */
 struct hci_dev *hci_dev_get(int index)
 {
-       struct hci_dev *hdev = NULL;
-       struct list_head *p;
+       struct hci_dev *hdev = NULL, *d;
 
        BT_DBG("%d", index);
 
@@ -328,8 +344,7 @@ struct hci_dev *hci_dev_get(int index)
                return NULL;
 
        read_lock(&hci_dev_list_lock);
-       list_for_each(p, &hci_dev_list) {
-               struct hci_dev *d = list_entry(p, struct hci_dev, list);
+       list_for_each_entry(d, &hci_dev_list, list) {
                if (d->id == index) {
                        hdev = hci_dev_hold(d);
                        break;
@@ -340,72 +355,207 @@ struct hci_dev *hci_dev_get(int index)
 }
 
 /* ---- Inquiry support ---- */
-static void inquiry_cache_flush(struct hci_dev *hdev)
+
+bool hci_discovery_active(struct hci_dev *hdev)
 {
-       struct inquiry_cache *cache = &hdev->inq_cache;
-       struct inquiry_entry *next  = cache->list, *e;
+       struct discovery_state *discov = &hdev->discovery;
 
-       BT_DBG("cache %p", cache);
+       switch (discov->state) {
+       case DISCOVERY_INQUIRY:
+       case DISCOVERY_LE_SCAN:
+       case DISCOVERY_RESOLVING:
+               return true;
 
-       cache->list = NULL;
-       while ((e = next)) {
-               next = e->next;
-               kfree(e);
+       default:
+               return false;
        }
 }
 
+void hci_discovery_set_state(struct hci_dev *hdev, int state)
+{
+       BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
+
+       if (hdev->discovery.state == state)
+               return;
+
+       switch (state) {
+       case DISCOVERY_STOPPED:
+               hdev->discovery.type = 0;
+
+               if (hdev->discovery.state != DISCOVERY_STARTING)
+                       mgmt_discovering(hdev, 0);
+               break;
+       case DISCOVERY_STARTING:
+               break;
+       case DISCOVERY_INQUIRY:
+       case DISCOVERY_LE_SCAN:
+               mgmt_discovering(hdev, 1);
+               break;
+       case DISCOVERY_RESOLVING:
+               break;
+       case DISCOVERY_STOPPING:
+               break;
+       }
+
+       hdev->discovery.state = state;
+}
+
+static void inquiry_cache_flush(struct hci_dev *hdev)
+{
+       struct discovery_state *cache = &hdev->discovery;
+       struct inquiry_entry *p, *n;
+
+       list_for_each_entry_safe(p, n, &cache->all, all) {
+               list_del(&p->all);
+               kfree(p);
+       }
+
+       INIT_LIST_HEAD(&cache->unknown);
+       INIT_LIST_HEAD(&cache->resolve);
+       cache->state = DISCOVERY_STOPPED;
+}
+
 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
-       struct inquiry_cache *cache = &hdev->inq_cache;
+       struct discovery_state *cache = &hdev->discovery;
+       struct inquiry_entry *e;
+
+       BT_DBG("cache %p, %s", cache, batostr(bdaddr));
+
+       list_for_each_entry(e, &cache->all, all) {
+               if (!bacmp(&e->data.bdaddr, bdaddr))
+                       return e;
+       }
+
+       return NULL;
+}
+
+struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
+                                                       bdaddr_t *bdaddr)
+{
+       struct discovery_state *cache = &hdev->discovery;
        struct inquiry_entry *e;
 
        BT_DBG("cache %p, %s", cache, batostr(bdaddr));
 
-       for (e = cache->list; e; e = e->next)
+       list_for_each_entry(e, &cache->unknown, list) {
                if (!bacmp(&e->data.bdaddr, bdaddr))
+                       return e;
+       }
+
+       return NULL;
+}
+
+struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
+                                                       bdaddr_t *bdaddr,
+                                                       int state)
+{
+       struct discovery_state *cache = &hdev->discovery;
+       struct inquiry_entry *e;
+
+       BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
+
+       list_for_each_entry(e, &cache->resolve, list) {
+               if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
+                       return e;
+               if (!bacmp(&e->data.bdaddr, bdaddr))
+                       return e;
+       }
+
+       return NULL;
+}
+
+void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
+                                               struct inquiry_entry *ie)
+{
+       struct discovery_state *cache = &hdev->discovery;
+       struct list_head *pos = &cache->resolve;
+       struct inquiry_entry *p;
+
+       list_del(&ie->list);
+
+       list_for_each_entry(p, &cache->resolve, list) {
+               if (p->name_state != NAME_PENDING &&
+                               abs(p->data.rssi) >= abs(ie->data.rssi))
                        break;
-       return e;
+               pos = &p->list;
+       }
+
+       list_add(&ie->list, pos);
 }
 
-void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
+bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
+                                                       bool name_known)
 {
-       struct inquiry_cache *cache = &hdev->inq_cache;
+       struct discovery_state *cache = &hdev->discovery;
        struct inquiry_entry *ie;
 
        BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
 
        ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
-       if (!ie) {
-               /* Entry not in the cache. Add new one. */
-               ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
-               if (!ie)
-                       return;
+       if (ie) {
+               if (ie->name_state == NAME_NEEDED &&
+                                               data->rssi != ie->data.rssi) {
+                       ie->data.rssi = data->rssi;
+                       hci_inquiry_cache_update_resolve(hdev, ie);
+               }
+
+               goto update;
+       }
+
+       /* Entry not in the cache. Add new one. */
+       ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
+       if (!ie)
+               return false;
+
+       list_add(&ie->all, &cache->all);
+
+       if (name_known) {
+               ie->name_state = NAME_KNOWN;
+       } else {
+               ie->name_state = NAME_NOT_KNOWN;
+               list_add(&ie->list, &cache->unknown);
+       }
 
-               ie->next = cache->list;
-               cache->list = ie;
+update:
+       if (name_known && ie->name_state != NAME_KNOWN &&
+                                       ie->name_state != NAME_PENDING) {
+               ie->name_state = NAME_KNOWN;
+               list_del(&ie->list);
        }
 
        memcpy(&ie->data, data, sizeof(*data));
        ie->timestamp = jiffies;
        cache->timestamp = jiffies;
+
+       if (ie->name_state == NAME_NOT_KNOWN)
+               return false;
+
+       return true;
 }
 
 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
 {
-       struct inquiry_cache *cache = &hdev->inq_cache;
+       struct discovery_state *cache = &hdev->discovery;
        struct inquiry_info *info = (struct inquiry_info *) buf;
        struct inquiry_entry *e;
        int copied = 0;
 
-       for (e = cache->list; e && copied < num; e = e->next, copied++) {
+       list_for_each_entry(e, &cache->all, all) {
                struct inquiry_data *data = &e->data;
+
+               if (copied >= num)
+                       break;
+
                bacpy(&info->bdaddr, &data->bdaddr);
                info->pscan_rep_mode    = data->pscan_rep_mode;
                info->pscan_period_mode = data->pscan_period_mode;
                info->pscan_mode        = data->pscan_mode;
                memcpy(info->dev_class, data->dev_class, 3);
                info->clock_offset      = data->clock_offset;
+
                info++;
+               copied++;
        }
 
        BT_DBG("cache %p, copied %d", cache, copied);
@@ -445,14 +595,14 @@ int hci_inquiry(void __user *arg)
        if (!hdev)
                return -ENODEV;
 
-       hci_dev_lock_bh(hdev);
+       hci_dev_lock(hdev);
        if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
                                inquiry_cache_empty(hdev) ||
                                ir.flags & IREQ_CACHE_FLUSH) {
                inquiry_cache_flush(hdev);
                do_inquiry = 1;
        }
-       hci_dev_unlock_bh(hdev);
+       hci_dev_unlock(hdev);
 
        timeo = ir.length * msecs_to_jiffies(2000);
 
@@ -474,9 +624,9 @@ int hci_inquiry(void __user *arg)
                goto done;
        }
 
-       hci_dev_lock_bh(hdev);
+       hci_dev_lock(hdev);
        ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
-       hci_dev_unlock_bh(hdev);
+       hci_dev_unlock(hdev);
 
        BT_DBG("num_rsp %d", ir.num_rsp);
 
@@ -523,8 +673,9 @@ int hci_dev_open(__u16 dev)
        if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
                set_bit(HCI_RAW, &hdev->flags);
 
-       /* Treat all non BR/EDR controllers as raw devices for now */
-       if (hdev->dev_type != HCI_BREDR)
+       /* Treat all non BR/EDR controllers as raw devices if
+          enable_hs is not set */
+       if (hdev->dev_type != HCI_BREDR && !enable_hs)
                set_bit(HCI_RAW, &hdev->flags);
 
        if (hdev->open(hdev)) {
@@ -551,13 +702,16 @@ int hci_dev_open(__u16 dev)
                hci_dev_hold(hdev);
                set_bit(HCI_UP, &hdev->flags);
                hci_notify(hdev, HCI_DEV_UP);
-               if (!test_bit(HCI_SETUP, &hdev->flags))
-                       mgmt_powered(hdev->id, 1);
+               if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
+                       hci_dev_lock(hdev);
+                       mgmt_powered(hdev, 1);
+                       hci_dev_unlock(hdev);
+               }
        } else {
                /* Init failed, cleanup */
-               tasklet_kill(&hdev->rx_task);
-               tasklet_kill(&hdev->tx_task);
-               tasklet_kill(&hdev->cmd_task);
+               flush_work(&hdev->tx_work);
+               flush_work(&hdev->cmd_work);
+               flush_work(&hdev->rx_work);
 
                skb_queue_purge(&hdev->cmd_q);
                skb_queue_purge(&hdev->rx_q);
@@ -584,6 +738,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 {
        BT_DBG("%s %p", hdev->name, hdev);
 
+       cancel_work_sync(&hdev->le_scan);
+
        hci_req_cancel(hdev, ENODEV);
        hci_req_lock(hdev);
 
@@ -593,14 +749,27 @@ static int hci_dev_do_close(struct hci_dev *hdev)
                return 0;
        }
 
-       /* Kill RX and TX tasks */
-       tasklet_kill(&hdev->rx_task);
-       tasklet_kill(&hdev->tx_task);
+       /* Flush RX and TX works */
+       flush_work(&hdev->tx_work);
+       flush_work(&hdev->rx_work);
+
+       if (hdev->discov_timeout > 0) {
+               cancel_delayed_work(&hdev->discov_off);
+               hdev->discov_timeout = 0;
+       }
+
+       if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+               cancel_delayed_work(&hdev->power_off);
 
-       hci_dev_lock_bh(hdev);
+       if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+               cancel_delayed_work(&hdev->service_cache);
+
+       cancel_delayed_work_sync(&hdev->le_scan_disable);
+
+       hci_dev_lock(hdev);
        inquiry_cache_flush(hdev);
        hci_conn_hash_flush(hdev);
-       hci_dev_unlock_bh(hdev);
+       hci_dev_unlock(hdev);
 
        hci_notify(hdev, HCI_DEV_DOWN);
 
@@ -610,15 +779,16 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        /* Reset device */
        skb_queue_purge(&hdev->cmd_q);
        atomic_set(&hdev->cmd_cnt, 1);
-       if (!test_bit(HCI_RAW, &hdev->flags)) {
+       if (!test_bit(HCI_RAW, &hdev->flags) &&
+                               test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
                set_bit(HCI_INIT, &hdev->flags);
                __hci_request(hdev, hci_reset_req, 0,
                                        msecs_to_jiffies(250));
                clear_bit(HCI_INIT, &hdev->flags);
        }
 
-       /* Kill cmd task */
-       tasklet_kill(&hdev->cmd_task);
+       /* flush cmd  work */
+       flush_work(&hdev->cmd_work);
 
        /* Drop queues */
        skb_queue_purge(&hdev->rx_q);
@@ -636,7 +806,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
         * and no tasks are scheduled. */
        hdev->close(hdev);
 
-       mgmt_powered(hdev->id, 0);
+       hci_dev_lock(hdev);
+       mgmt_powered(hdev, 0);
+       hci_dev_unlock(hdev);
 
        /* Clear flags */
        hdev->flags = 0;
@@ -670,7 +842,6 @@ int hci_dev_reset(__u16 dev)
                return -ENODEV;
 
        hci_req_lock(hdev);
-       tasklet_disable(&hdev->tx_task);
 
        if (!test_bit(HCI_UP, &hdev->flags))
                goto done;
@@ -679,10 +850,10 @@ int hci_dev_reset(__u16 dev)
        skb_queue_purge(&hdev->rx_q);
        skb_queue_purge(&hdev->cmd_q);
 
-       hci_dev_lock_bh(hdev);
+       hci_dev_lock(hdev);
        inquiry_cache_flush(hdev);
        hci_conn_hash_flush(hdev);
-       hci_dev_unlock_bh(hdev);
+       hci_dev_unlock(hdev);
 
        if (hdev->flush)
                hdev->flush(hdev);
@@ -695,7 +866,6 @@ int hci_dev_reset(__u16 dev)
                                        msecs_to_jiffies(HCI_INIT_TIMEOUT));
 
 done:
-       tasklet_enable(&hdev->tx_task);
        hci_req_unlock(hdev);
        hci_dev_put(hdev);
        return ret;
@@ -794,9 +964,9 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
 
 int hci_get_dev_list(void __user *arg)
 {
+       struct hci_dev *hdev;
        struct hci_dev_list_req *dl;
        struct hci_dev_req *dr;
-       struct list_head *p;
        int n = 0, size, err;
        __u16 dev_num;
 
@@ -814,16 +984,13 @@ int hci_get_dev_list(void __user *arg)
 
        dr = dl->dev_req;
 
-       read_lock_bh(&hci_dev_list_lock);
-       list_for_each(p, &hci_dev_list) {
-               struct hci_dev *hdev;
-
-               hdev = list_entry(p, struct hci_dev, list);
-
-               hci_del_off_timer(hdev);
+       read_lock(&hci_dev_list_lock);
+       list_for_each_entry(hdev, &hci_dev_list, list) {
+               if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+                       cancel_delayed_work(&hdev->power_off);
 
-               if (!test_bit(HCI_MGMT, &hdev->flags))
-                       set_bit(HCI_PAIRABLE, &hdev->flags);
+               if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+                       set_bit(HCI_PAIRABLE, &hdev->dev_flags);
 
                (dr + n)->dev_id  = hdev->id;
                (dr + n)->dev_opt = hdev->flags;
@@ -831,7 +998,7 @@ int hci_get_dev_list(void __user *arg)
                if (++n >= dev_num)
                        break;
        }
-       read_unlock_bh(&hci_dev_list_lock);
+       read_unlock(&hci_dev_list_lock);
 
        dl->dev_num = n;
        size = sizeof(*dl) + n * sizeof(*dr);
@@ -855,10 +1022,11 @@ int hci_get_dev_info(void __user *arg)
        if (!hdev)
                return -ENODEV;
 
-       hci_del_off_timer(hdev);
+       if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+               cancel_delayed_work_sync(&hdev->power_off);
 
-       if (!test_bit(HCI_MGMT, &hdev->flags))
-               set_bit(HCI_PAIRABLE, &hdev->flags);
+       if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+               set_bit(HCI_PAIRABLE, &hdev->dev_flags);
 
        strcpy(di.name, hdev->name);
        di.bdaddr   = hdev->bdaddr;
@@ -912,6 +1080,7 @@ struct hci_dev *hci_alloc_dev(void)
        if (!hdev)
                return NULL;
 
+       hci_init_sysfs(hdev);
        skb_queue_head_init(&hdev->driver_init);
 
        return hdev;
@@ -937,40 +1106,42 @@ static void hci_power_on(struct work_struct *work)
        if (hci_dev_open(hdev->id) < 0)
                return;
 
-       if (test_bit(HCI_AUTO_OFF, &hdev->flags))
-               mod_timer(&hdev->off_timer,
-                               jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+       if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+               schedule_delayed_work(&hdev->power_off,
+                                       msecs_to_jiffies(AUTO_OFF_TIMEOUT));
 
-       if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
-               mgmt_index_added(hdev->id);
+       if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
+               mgmt_index_added(hdev);
 }
 
 static void hci_power_off(struct work_struct *work)
 {
-       struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
+       struct hci_dev *hdev = container_of(work, struct hci_dev,
+                                                       power_off.work);
 
        BT_DBG("%s", hdev->name);
 
+       clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+
        hci_dev_close(hdev->id);
 }
 
-static void hci_auto_off(unsigned long data)
+static void hci_discov_off(struct work_struct *work)
 {
-       struct hci_dev *hdev = (struct hci_dev *) data;
+       struct hci_dev *hdev;
+       u8 scan = SCAN_PAGE;
+
+       hdev = container_of(work, struct hci_dev, discov_off.work);
 
        BT_DBG("%s", hdev->name);
 
-       clear_bit(HCI_AUTO_OFF, &hdev->flags);
+       hci_dev_lock(hdev);
 
-       queue_work(hdev->workqueue, &hdev->power_off);
-}
+       hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
 
-void hci_del_off_timer(struct hci_dev *hdev)
-{
-       BT_DBG("%s", hdev->name);
+       hdev->discov_timeout = 0;
 
-       clear_bit(HCI_AUTO_OFF, &hdev->flags);
-       del_timer(&hdev->off_timer);
+       hci_dev_unlock(hdev);
 }
 
 int hci_uuids_clear(struct hci_dev *hdev)
@@ -1005,18 +1176,25 @@ int hci_link_keys_clear(struct hci_dev *hdev)
        return 0;
 }
 
-struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+int hci_smp_ltks_clear(struct hci_dev *hdev)
 {
-       struct list_head *p;
+       struct smp_ltk *k, *tmp;
 
-       list_for_each(p, &hdev->link_keys) {
-               struct link_key *k;
+       list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+               list_del(&k->list);
+               kfree(k);
+       }
 
-               k = list_entry(p, struct link_key, list);
+       return 0;
+}
+
+struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+       struct link_key *k;
 
+       list_for_each_entry(k, &hdev->link_keys, list)
                if (bacmp(bdaddr, &k->bdaddr) == 0)
                        return k;
-       }
 
        return NULL;
 }
@@ -1057,41 +1235,35 @@ static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
        return 0;
 }
 
-struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
 {
-       struct link_key *k;
+       struct smp_ltk *k;
 
-       list_for_each_entry(k, &hdev->link_keys, list) {
-               struct key_master_id *id;
-
-               if (k->type != HCI_LK_SMP_LTK)
-                       continue;
-
-               if (k->dlen != sizeof(*id))
+       list_for_each_entry(k, &hdev->long_term_keys, list) {
+               if (k->ediv != ediv ||
+                               memcmp(rand, k->rand, sizeof(k->rand)))
                        continue;
 
-               id = (void *) &k->data;
-               if (id->ediv == ediv &&
-                               (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
-                       return k;
+               return k;
        }
 
        return NULL;
 }
 EXPORT_SYMBOL(hci_find_ltk);
 
-struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
-                                       bdaddr_t *bdaddr, u8 type)
+struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                                               u8 addr_type)
 {
-       struct link_key *k;
+       struct smp_ltk *k;
 
-       list_for_each_entry(k, &hdev->link_keys, list)
-               if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
+       list_for_each_entry(k, &hdev->long_term_keys, list)
+               if (addr_type == k->bdaddr_type &&
+                                       bacmp(bdaddr, &k->bdaddr) == 0)
                        return k;
 
        return NULL;
 }
-EXPORT_SYMBOL(hci_find_link_key_type);
+EXPORT_SYMBOL(hci_find_ltk_by_addr);
 
 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
                                bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
@@ -1138,7 +1310,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
 
        persistent = hci_persistent_key(hdev, conn, type, old_key_type);
 
-       mgmt_new_key(hdev->id, key, persistent);
+       mgmt_new_link_key(hdev, key, persistent);
 
        if (!persistent) {
                list_del(&key->list);
@@ -1148,40 +1320,39 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
        return 0;
 }
 
-int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
-                       u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
+int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
+                               int new_key, u8 authenticated, u8 tk[16],
+                               u8 enc_size, u16 ediv, u8 rand[8])
 {
-       struct link_key *key, *old_key;
-       struct key_master_id *id;
-       u8 old_key_type;
+       struct smp_ltk *key, *old_key;
 
-       BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
+       if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
+               return 0;
 
-       old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
-       if (old_key) {
+       old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
+       if (old_key)
                key = old_key;
-               old_key_type = old_key->type;
-       } else {
-               key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
+       else {
+               key = kzalloc(sizeof(*key), GFP_ATOMIC);
                if (!key)
                        return -ENOMEM;
-               list_add(&key->list, &hdev->link_keys);
-               old_key_type = 0xff;
+               list_add(&key->list, &hdev->long_term_keys);
        }
 
-       key->dlen = sizeof(*id);
-
        bacpy(&key->bdaddr, bdaddr);
-       memcpy(key->val, ltk, sizeof(key->val));
-       key->type = HCI_LK_SMP_LTK;
-       key->pin_len = key_size;
+       key->bdaddr_type = addr_type;
+       memcpy(key->val, tk, sizeof(key->val));
+       key->authenticated = authenticated;
+       key->ediv = ediv;
+       key->enc_size = enc_size;
+       key->type = type;
+       memcpy(key->rand, rand, sizeof(key->rand));
 
-       id = (void *) &key->data;
-       id->ediv = ediv;
-       memcpy(id->rand, rand, sizeof(id->rand));
+       if (!new_key)
+               return 0;
 
-       if (new_key)
-               mgmt_new_key(hdev->id, key, old_key_type);
+       if (type & HCI_SMP_LTK)
+               mgmt_new_ltk(hdev, key, 1);
 
        return 0;
 }
@@ -1202,6 +1373,23 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
        return 0;
 }
 
+int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+       struct smp_ltk *k, *tmp;
+
+       list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+               if (bacmp(bdaddr, &k->bdaddr))
+                       continue;
+
+               BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+
+               list_del(&k->list);
+               kfree(k);
+       }
+
+       return 0;
+}
+
 /* HCI command timer function */
 static void hci_cmd_timer(unsigned long arg)
 {
@@ -1209,7 +1397,7 @@ static void hci_cmd_timer(unsigned long arg)
 
        BT_ERR("%s command tx timeout", hdev->name);
        atomic_set(&hdev->cmd_cnt, 1);
-       tasklet_schedule(&hdev->cmd_task);
+       queue_work(hdev->workqueue, &hdev->cmd_work);
 }
 
 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
@@ -1279,16 +1467,11 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
                                                bdaddr_t *bdaddr)
 {
-       struct list_head *p;
-
-       list_for_each(p, &hdev->blacklist) {
-               struct bdaddr_list *b;
-
-               b = list_entry(p, struct bdaddr_list, list);
+       struct bdaddr_list *b;
 
+       list_for_each_entry(b, &hdev->blacklist, list)
                if (bacmp(bdaddr, &b->bdaddr) == 0)
                        return b;
-       }
 
        return NULL;
 }
@@ -1309,7 +1492,7 @@ int hci_blacklist_clear(struct hci_dev *hdev)
        return 0;
 }
 
-int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
+int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
 {
        struct bdaddr_list *entry;
 
@@ -1327,31 +1510,30 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
 
        list_add(&entry->list, &hdev->blacklist);
 
-       return mgmt_device_blocked(hdev->id, bdaddr);
+       return mgmt_device_blocked(hdev, bdaddr, type);
 }
 
-int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
+int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
 {
        struct bdaddr_list *entry;
 
-       if (bacmp(bdaddr, BDADDR_ANY) == 0) {
+       if (bacmp(bdaddr, BDADDR_ANY) == 0)
                return hci_blacklist_clear(hdev);
-       }
 
        entry = hci_blacklist_lookup(hdev, bdaddr);
-       if (!entry) {
+       if (!entry)
                return -ENOENT;
-       }
 
        list_del(&entry->list);
        kfree(entry);
 
-       return mgmt_device_unblocked(hdev->id, bdaddr);
+       return mgmt_device_unblocked(hdev, bdaddr, type);
 }
 
-static void hci_clear_adv_cache(unsigned long arg)
+static void hci_clear_adv_cache(struct work_struct *work)
 {
-       struct hci_dev *hdev = (void *) arg;
+       struct hci_dev *hdev = container_of(work, struct hci_dev,
+                                                       adv_work.work);
 
        hci_dev_lock(hdev);
 
@@ -1406,7 +1588,7 @@ int hci_add_adv_entry(struct hci_dev *hdev,
        if (hci_find_adv_entry(hdev, &ev->bdaddr))
                return 0;
 
-       entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
 
@@ -1421,19 +1603,124 @@ int hci_add_adv_entry(struct hci_dev *hdev,
        return 0;
 }
 
+static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
+{
+       struct le_scan_params *param =  (struct le_scan_params *) opt;
+       struct hci_cp_le_set_scan_param cp;
+
+       memset(&cp, 0, sizeof(cp));
+       cp.type = param->type;
+       cp.interval = cpu_to_le16(param->interval);
+       cp.window = cpu_to_le16(param->window);
+
+       hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
+}
+
+static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
+{
+       struct hci_cp_le_set_scan_enable cp;
+
+       memset(&cp, 0, sizeof(cp));
+       cp.enable = 1;
+
+       hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+}
+
+static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
+                                               u16 window, int timeout)
+{
+       long timeo = msecs_to_jiffies(3000);
+       struct le_scan_params param;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+               return -EINPROGRESS;
+
+       param.type = type;
+       param.interval = interval;
+       param.window = window;
+
+       hci_req_lock(hdev);
+
+       err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
+                                                                       timeo);
+       if (!err)
+               err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
+
+       hci_req_unlock(hdev);
+
+       if (err < 0)
+               return err;
+
+       schedule_delayed_work(&hdev->le_scan_disable,
+                                               msecs_to_jiffies(timeout));
+
+       return 0;
+}
+
+static void le_scan_disable_work(struct work_struct *work)
+{
+       struct hci_dev *hdev = container_of(work, struct hci_dev,
+                                               le_scan_disable.work);
+       struct hci_cp_le_set_scan_enable cp;
+
+       BT_DBG("%s", hdev->name);
+
+       memset(&cp, 0, sizeof(cp));
+
+       hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+}
+
+static void le_scan_work(struct work_struct *work)
+{
+       struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
+       struct le_scan_params *param = &hdev->le_scan_params;
+
+       BT_DBG("%s", hdev->name);
+
+       hci_do_le_scan(hdev, param->type, param->interval,
+                                       param->window, param->timeout);
+}
+
+int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
+                                                               int timeout)
+{
+       struct le_scan_params *param = &hdev->le_scan_params;
+
+       BT_DBG("%s", hdev->name);
+
+       if (work_busy(&hdev->le_scan))
+               return -EINPROGRESS;
+
+       param->type = type;
+       param->interval = interval;
+       param->window = window;
+       param->timeout = timeout;
+
+       queue_work(system_long_wq, &hdev->le_scan);
+
+       return 0;
+}
+
 /* Register HCI device */
 int hci_register_dev(struct hci_dev *hdev)
 {
        struct list_head *head = &hci_dev_list, *p;
-       int i, id = 0;
+       int i, id, error;
 
-       BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
-                                               hdev->bus, hdev->owner);
+       BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
-       if (!hdev->open || !hdev->close || !hdev->destruct)
+       if (!hdev->open || !hdev->close)
                return -EINVAL;
 
-       write_lock_bh(&hci_dev_list_lock);
+       /* Do not allow HCI_AMP devices to register at index 0,
+        * so the index can be used as the AMP controller ID.
+        */
+       id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
+
+       write_lock(&hci_dev_list_lock);
 
        /* Find first available device id */
        list_for_each(p, &hci_dev_list) {
@@ -1444,12 +1731,12 @@ int hci_register_dev(struct hci_dev *hdev)
 
        sprintf(hdev->name, "hci%d", id);
        hdev->id = id;
-       list_add(&hdev->list, head);
+       list_add_tail(&hdev->list, head);
 
-       atomic_set(&hdev->refcnt, 1);
-       spin_lock_init(&hdev->lock);
+       mutex_init(&hdev->lock);
 
        hdev->flags = 0;
+       hdev->dev_flags = 0;
        hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
        hdev->esco_type = (ESCO_HV1);
        hdev->link_mode = (HCI_LM_ACCEPT);
@@ -1459,9 +1746,10 @@ int hci_register_dev(struct hci_dev *hdev)
        hdev->sniff_max_interval = 800;
        hdev->sniff_min_interval = 80;
 
-       tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
-       tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
-       tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
+       INIT_WORK(&hdev->rx_work, hci_rx_work);
+       INIT_WORK(&hdev->cmd_work, hci_cmd_work);
+       INIT_WORK(&hdev->tx_work, hci_tx_work);
+
 
        skb_queue_head_init(&hdev->rx_q);
        skb_queue_head_init(&hdev->cmd_q);
@@ -1475,37 +1763,49 @@ int hci_register_dev(struct hci_dev *hdev)
        init_waitqueue_head(&hdev->req_wait_q);
        mutex_init(&hdev->req_lock);
 
-       inquiry_cache_init(hdev);
+       discovery_init(hdev);
 
        hci_conn_hash_init(hdev);
 
+       INIT_LIST_HEAD(&hdev->mgmt_pending);
+
        INIT_LIST_HEAD(&hdev->blacklist);
 
        INIT_LIST_HEAD(&hdev->uuids);
 
        INIT_LIST_HEAD(&hdev->link_keys);
+       INIT_LIST_HEAD(&hdev->long_term_keys);
 
        INIT_LIST_HEAD(&hdev->remote_oob_data);
 
        INIT_LIST_HEAD(&hdev->adv_entries);
-       setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
-                                               (unsigned long) hdev);
 
+       INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
        INIT_WORK(&hdev->power_on, hci_power_on);
-       INIT_WORK(&hdev->power_off, hci_power_off);
-       setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
+       INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+
+       INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
 
        memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
 
        atomic_set(&hdev->promisc, 0);
 
-       write_unlock_bh(&hci_dev_list_lock);
+       INIT_WORK(&hdev->le_scan, le_scan_work);
 
-       hdev->workqueue = create_singlethread_workqueue(hdev->name);
-       if (!hdev->workqueue)
-               goto nomem;
+       INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
 
-       hci_register_sysfs(hdev);
+       write_unlock(&hci_dev_list_lock);
+
+       hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
+                                                       WQ_MEM_RECLAIM, 1);
+       if (!hdev->workqueue) {
+               error = -ENOMEM;
+               goto err;
+       }
+
+       error = hci_add_sysfs(hdev);
+       if (error < 0)
+               goto err_wqueue;
 
        hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
                                RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
@@ -1516,33 +1816,36 @@ int hci_register_dev(struct hci_dev *hdev)
                }
        }
 
-       set_bit(HCI_AUTO_OFF, &hdev->flags);
-       set_bit(HCI_SETUP, &hdev->flags);
-       queue_work(hdev->workqueue, &hdev->power_on);
+       set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+       set_bit(HCI_SETUP, &hdev->dev_flags);
+       schedule_work(&hdev->power_on);
 
        hci_notify(hdev, HCI_DEV_REG);
+       hci_dev_hold(hdev);
 
        return id;
 
-nomem:
-       write_lock_bh(&hci_dev_list_lock);
+err_wqueue:
+       destroy_workqueue(hdev->workqueue);
+err:
+       write_lock(&hci_dev_list_lock);
        list_del(&hdev->list);
-       write_unlock_bh(&hci_dev_list_lock);
+       write_unlock(&hci_dev_list_lock);
 
-       return -ENOMEM;
+       return error;
 }
 EXPORT_SYMBOL(hci_register_dev);
 
 /* Unregister HCI device */
-int hci_unregister_dev(struct hci_dev *hdev)
+void hci_unregister_dev(struct hci_dev *hdev)
 {
        int i;
 
        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
-       write_lock_bh(&hci_dev_list_lock);
+       write_lock(&hci_dev_list_lock);
        list_del(&hdev->list);
-       write_unlock_bh(&hci_dev_list_lock);
+       write_unlock(&hci_dev_list_lock);
 
        hci_dev_do_close(hdev);
 
@@ -1550,8 +1853,15 @@ int hci_unregister_dev(struct hci_dev *hdev)
                kfree_skb(hdev->reassembly[i]);
 
        if (!test_bit(HCI_INIT, &hdev->flags) &&
-                                       !test_bit(HCI_SETUP, &hdev->flags))
-               mgmt_index_removed(hdev->id);
+                               !test_bit(HCI_SETUP, &hdev->dev_flags)) {
+               hci_dev_lock(hdev);
+               mgmt_index_removed(hdev);
+               hci_dev_unlock(hdev);
+       }
+
+       /* mgmt_index_removed should take care of emptying the
+        * pending list */
+       BUG_ON(!list_empty(&hdev->mgmt_pending));
 
        hci_notify(hdev, HCI_DEV_UNREG);
 
@@ -1560,24 +1870,22 @@ int hci_unregister_dev(struct hci_dev *hdev)
                rfkill_destroy(hdev->rfkill);
        }
 
-       hci_unregister_sysfs(hdev);
+       hci_del_sysfs(hdev);
 
-       hci_del_off_timer(hdev);
-       del_timer(&hdev->adv_timer);
+       cancel_delayed_work_sync(&hdev->adv_work);
 
        destroy_workqueue(hdev->workqueue);
 
-       hci_dev_lock_bh(hdev);
+       hci_dev_lock(hdev);
        hci_blacklist_clear(hdev);
        hci_uuids_clear(hdev);
        hci_link_keys_clear(hdev);
+       hci_smp_ltks_clear(hdev);
        hci_remote_oob_data_clear(hdev);
        hci_adv_entries_clear(hdev);
-       hci_dev_unlock_bh(hdev);
-
-       __hci_dev_put(hdev);
+       hci_dev_unlock(hdev);
 
-       return 0;
+       hci_dev_put(hdev);
 }
 EXPORT_SYMBOL(hci_unregister_dev);
 
@@ -1613,9 +1921,8 @@ int hci_recv_frame(struct sk_buff *skb)
        /* Time stamp */
        __net_timestamp(skb);
 
-       /* Queue frame for rx task */
        skb_queue_tail(&hdev->rx_q, skb);
-       tasklet_schedule(&hdev->rx_task);
+       queue_work(hdev->workqueue, &hdev->rx_work);
 
        return 0;
 }
@@ -1787,59 +2094,13 @@ EXPORT_SYMBOL(hci_recv_stream_fragment);
 
 /* ---- Interface to upper protocols ---- */
 
-/* Register/Unregister protocols.
- * hci_task_lock is used to ensure that no tasks are running. */
-int hci_register_proto(struct hci_proto *hp)
-{
-       int err = 0;
-
-       BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
-
-       if (hp->id >= HCI_MAX_PROTO)
-               return -EINVAL;
-
-       write_lock_bh(&hci_task_lock);
-
-       if (!hci_proto[hp->id])
-               hci_proto[hp->id] = hp;
-       else
-               err = -EEXIST;
-
-       write_unlock_bh(&hci_task_lock);
-
-       return err;
-}
-EXPORT_SYMBOL(hci_register_proto);
-
-int hci_unregister_proto(struct hci_proto *hp)
-{
-       int err = 0;
-
-       BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
-
-       if (hp->id >= HCI_MAX_PROTO)
-               return -EINVAL;
-
-       write_lock_bh(&hci_task_lock);
-
-       if (hci_proto[hp->id])
-               hci_proto[hp->id] = NULL;
-       else
-               err = -ENOENT;
-
-       write_unlock_bh(&hci_task_lock);
-
-       return err;
-}
-EXPORT_SYMBOL(hci_unregister_proto);
-
 int hci_register_cb(struct hci_cb *cb)
 {
        BT_DBG("%p name %s", cb, cb->name);
 
-       write_lock_bh(&hci_cb_list_lock);
+       write_lock(&hci_cb_list_lock);
        list_add(&cb->list, &hci_cb_list);
-       write_unlock_bh(&hci_cb_list_lock);
+       write_unlock(&hci_cb_list_lock);
 
        return 0;
 }
@@ -1849,9 +2110,9 @@ int hci_unregister_cb(struct hci_cb *cb)
 {
        BT_DBG("%p name %s", cb, cb->name);
 
-       write_lock_bh(&hci_cb_list_lock);
+       write_lock(&hci_cb_list_lock);
        list_del(&cb->list);
-       write_unlock_bh(&hci_cb_list_lock);
+       write_unlock(&hci_cb_list_lock);
 
        return 0;
 }
@@ -1912,7 +2173,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
                hdev->init_last_cmd = opcode;
 
        skb_queue_tail(&hdev->cmd_q, skb);
-       tasklet_schedule(&hdev->cmd_task);
+       queue_work(hdev->workqueue, &hdev->cmd_work);
 
        return 0;
 }
@@ -1948,23 +2209,18 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
        hdr->dlen   = cpu_to_le16(len);
 }
 
-void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
+static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
+                               struct sk_buff *skb, __u16 flags)
 {
        struct hci_dev *hdev = conn->hdev;
        struct sk_buff *list;
 
-       BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
-
-       skb->dev = (void *) hdev;
-       bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
-       hci_add_acl_hdr(skb, conn->handle, flags);
-
        list = skb_shinfo(skb)->frag_list;
        if (!list) {
                /* Non fragmented */
                BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
 
-               skb_queue_tail(&conn->data_q, skb);
+               skb_queue_tail(queue, skb);
        } else {
                /* Fragmented */
                BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -1972,9 +2228,9 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
                skb_shinfo(skb)->frag_list = NULL;
 
                /* Queue all fragments atomically */
-               spin_lock_bh(&conn->data_q.lock);
+               spin_lock(&queue->lock);
 
-               __skb_queue_tail(&conn->data_q, skb);
+               __skb_queue_tail(queue, skb);
 
                flags &= ~ACL_START;
                flags |= ACL_CONT;
@@ -1987,13 +2243,27 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
 
                        BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
 
-                       __skb_queue_tail(&conn->data_q, skb);
+                       __skb_queue_tail(queue, skb);
                } while (list);
 
-               spin_unlock_bh(&conn->data_q.lock);
+               spin_unlock(&queue->lock);
        }
+}
+
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
+{
+       struct hci_conn *conn = chan->conn;
+       struct hci_dev *hdev = conn->hdev;
 
-       tasklet_schedule(&hdev->tx_task);
+       BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
+
+       skb->dev = (void *) hdev;
+       bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+       hci_add_acl_hdr(skb, conn->handle, flags);
+
+       hci_queue_acl(conn, &chan->data_q, skb, flags);
+
+       queue_work(hdev->workqueue, &hdev->tx_work);
 }
 EXPORT_SYMBOL(hci_send_acl);
 
@@ -2016,7 +2286,7 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
        bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
 
        skb_queue_tail(&conn->data_q, skb);
-       tasklet_schedule(&hdev->tx_task);
+       queue_work(hdev->workqueue, &hdev->tx_work);
 }
 EXPORT_SYMBOL(hci_send_sco);
 
@@ -2026,16 +2296,15 @@ EXPORT_SYMBOL(hci_send_sco);
 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
-       struct hci_conn *conn = NULL;
+       struct hci_conn *conn = NULL, *c;
        int num = 0, min = ~0;
-       struct list_head *p;
 
        /* We don't have to lock device here. Connections are always
         * added and removed with TX task disabled. */
-       list_for_each(p, &h->list) {
-               struct hci_conn *c;
-               c = list_entry(p, struct hci_conn, list);
 
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(c, &h->list, list) {
                if (c->type != type || skb_queue_empty(&c->data_q))
                        continue;
 
@@ -2053,6 +2322,8 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
                        break;
        }
 
+       rcu_read_unlock();
+
        if (conn) {
                int cnt, q;
 
@@ -2084,53 +2355,270 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
-       struct list_head *p;
-       struct hci_conn  *c;
+       struct hci_conn *c;
 
        BT_ERR("%s link tx timeout", hdev->name);
 
+       rcu_read_lock();
+
        /* Kill stalled connections */
-       list_for_each(p, &h->list) {
-               c = list_entry(p, struct hci_conn, list);
+       list_for_each_entry_rcu(c, &h->list, list) {
                if (c->type == type && c->sent) {
                        BT_ERR("%s killing stalled connection %s",
                                hdev->name, batostr(&c->dst));
                        hci_acl_disconn(c, 0x13);
                }
        }
+
+       rcu_read_unlock();
 }
 
-static inline void hci_sched_acl(struct hci_dev *hdev)
+static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
+                                               int *quote)
 {
+       struct hci_conn_hash *h = &hdev->conn_hash;
+       struct hci_chan *chan = NULL;
+       int num = 0, min = ~0, cur_prio = 0;
        struct hci_conn *conn;
-       struct sk_buff *skb;
-       int quote;
+       int cnt, q, conn_num = 0;
 
        BT_DBG("%s", hdev->name);
 
-       if (!hci_conn_num(hdev, ACL_LINK))
-               return;
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(conn, &h->list, list) {
+               struct hci_chan *tmp;
+
+               if (conn->type != type)
+                       continue;
+
+               if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+                       continue;
+
+               conn_num++;
+
+               list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
+                       struct sk_buff *skb;
+
+                       if (skb_queue_empty(&tmp->data_q))
+                               continue;
+
+                       skb = skb_peek(&tmp->data_q);
+                       if (skb->priority < cur_prio)
+                               continue;
+
+                       if (skb->priority > cur_prio) {
+                               num = 0;
+                               min = ~0;
+                               cur_prio = skb->priority;
+                       }
+
+                       num++;
+
+                       if (conn->sent < min) {
+                               min  = conn->sent;
+                               chan = tmp;
+                       }
+               }
+
+               if (hci_conn_num(hdev, type) == conn_num)
+                       break;
+       }
+
+       rcu_read_unlock();
+
+       if (!chan)
+               return NULL;
+
+       switch (chan->conn->type) {
+       case ACL_LINK:
+               cnt = hdev->acl_cnt;
+               break;
+       case SCO_LINK:
+       case ESCO_LINK:
+               cnt = hdev->sco_cnt;
+               break;
+       case LE_LINK:
+               cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+               break;
+       default:
+               cnt = 0;
+               BT_ERR("Unknown link type");
+       }
+
+       q = cnt / num;
+       *quote = q ? q : 1;
+       BT_DBG("chan %p quote %d", chan, *quote);
+       return chan;
+}
+
+static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
+{
+       struct hci_conn_hash *h = &hdev->conn_hash;
+       struct hci_conn *conn;
+       int num = 0;
 
+       BT_DBG("%s", hdev->name);
+
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(conn, &h->list, list) {
+               struct hci_chan *chan;
+
+               if (conn->type != type)
+                       continue;
+
+               if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+                       continue;
+
+               num++;
+
+               list_for_each_entry_rcu(chan, &conn->chan_list, list) {
+                       struct sk_buff *skb;
+
+                       if (chan->sent) {
+                               chan->sent = 0;
+                               continue;
+                       }
+
+                       if (skb_queue_empty(&chan->data_q))
+                               continue;
+
+                       skb = skb_peek(&chan->data_q);
+                       if (skb->priority >= HCI_PRIO_MAX - 1)
+                               continue;
+
+                       skb->priority = HCI_PRIO_MAX - 1;
+
+                       BT_DBG("chan %p skb %p promoted to %d", chan, skb,
+                                                               skb->priority);
+               }
+
+               if (hci_conn_num(hdev, type) == num)
+                       break;
+       }
+
+       rcu_read_unlock();
+
+}
+
+static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       /* Calculate count of blocks used by this packet */
+       return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
+}
+
+static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
+{
        if (!test_bit(HCI_RAW, &hdev->flags)) {
                /* ACL tx timeout must be longer than maximum
                 * link supervision timeout (40.9 seconds) */
-               if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
+               if (!cnt && time_after(jiffies, hdev->acl_last_tx +
+                                       msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
                        hci_link_tx_to(hdev, ACL_LINK);
        }
+}
 
-       while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
-               while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
-                       BT_DBG("skb %p len %d", skb, skb->len);
+static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
+{
+       unsigned int cnt = hdev->acl_cnt;
+       struct hci_chan *chan;
+       struct sk_buff *skb;
+       int quote;
 
-                       hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+       __check_timeout(hdev, cnt);
+
+       while (hdev->acl_cnt &&
+                       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+               u32 priority = (skb_peek(&chan->data_q))->priority;
+               while (quote-- && (skb = skb_peek(&chan->data_q))) {
+                       BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+                                       skb->len, skb->priority);
+
+                       /* Stop if priority has changed */
+                       if (skb->priority < priority)
+                               break;
+
+                       skb = skb_dequeue(&chan->data_q);
+
+                       hci_conn_enter_active_mode(chan->conn,
+                                               bt_cb(skb)->force_active);
 
                        hci_send_frame(skb);
                        hdev->acl_last_tx = jiffies;
 
                        hdev->acl_cnt--;
-                       conn->sent++;
+                       chan->sent++;
+                       chan->conn->sent++;
+               }
+       }
+
+       if (cnt != hdev->acl_cnt)
+               hci_prio_recalculate(hdev, ACL_LINK);
+}
+
+static inline void hci_sched_acl_blk(struct hci_dev *hdev)
+{
+       unsigned int cnt = hdev->block_cnt;
+       struct hci_chan *chan;
+       struct sk_buff *skb;
+       int quote;
+
+       __check_timeout(hdev, cnt);
+
+       while (hdev->block_cnt > 0 &&
+                       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+               u32 priority = (skb_peek(&chan->data_q))->priority;
+               while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
+                       int blocks;
+
+                       BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+                                               skb->len, skb->priority);
+
+                       /* Stop if priority has changed */
+                       if (skb->priority < priority)
+                               break;
+
+                       skb = skb_dequeue(&chan->data_q);
+
+                       blocks = __get_blocks(hdev, skb);
+                       if (blocks > hdev->block_cnt)
+                               return;
+
+                       hci_conn_enter_active_mode(chan->conn,
+                                               bt_cb(skb)->force_active);
+
+                       hci_send_frame(skb);
+                       hdev->acl_last_tx = jiffies;
+
+                       hdev->block_cnt -= blocks;
+                       quote -= blocks;
+
+                       chan->sent += blocks;
+                       chan->conn->sent += blocks;
                }
        }
+
+       if (cnt != hdev->block_cnt)
+               hci_prio_recalculate(hdev, ACL_LINK);
+}
+
+static inline void hci_sched_acl(struct hci_dev *hdev)
+{
+       BT_DBG("%s", hdev->name);
+
+       if (!hci_conn_num(hdev, ACL_LINK))
+               return;
+
+       switch (hdev->flow_ctl_mode) {
+       case HCI_FLOW_CTL_MODE_PACKET_BASED:
+               hci_sched_acl_pkt(hdev);
+               break;
+
+       case HCI_FLOW_CTL_MODE_BLOCK_BASED:
+               hci_sched_acl_blk(hdev);
+               break;
+       }
 }
 
 /* Schedule SCO */
@@ -2182,9 +2670,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
 
 static inline void hci_sched_le(struct hci_dev *hdev)
 {
-       struct hci_conn *conn;
+       struct hci_chan *chan;
        struct sk_buff *skb;
-       int quote, cnt;
+       int quote, cnt, tmp;
 
        BT_DBG("%s", hdev->name);
 
@@ -2200,30 +2688,42 @@ static inline void hci_sched_le(struct hci_dev *hdev)
        }
 
        cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
-       while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
-               while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
-                       BT_DBG("skb %p len %d", skb, skb->len);
+       tmp = cnt;
+       while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
+               u32 priority = (skb_peek(&chan->data_q))->priority;
+               while (quote-- && (skb = skb_peek(&chan->data_q))) {
+                       BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+                                       skb->len, skb->priority);
+
+                       /* Stop if priority has changed */
+                       if (skb->priority < priority)
+                               break;
+
+                       skb = skb_dequeue(&chan->data_q);
 
                        hci_send_frame(skb);
                        hdev->le_last_tx = jiffies;
 
                        cnt--;
-                       conn->sent++;
+                       chan->sent++;
+                       chan->conn->sent++;
                }
        }
+
        if (hdev->le_pkts)
                hdev->le_cnt = cnt;
        else
                hdev->acl_cnt = cnt;
+
+       if (cnt != tmp)
+               hci_prio_recalculate(hdev, LE_LINK);
 }
 
-static void hci_tx_task(unsigned long arg)
+static void hci_tx_work(struct work_struct *work)
 {
-       struct hci_dev *hdev = (struct hci_dev *) arg;
+       struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
        struct sk_buff *skb;
 
-       read_lock(&hci_task_lock);
-
        BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
                hdev->sco_cnt, hdev->le_cnt);
 
@@ -2240,8 +2740,6 @@ static void hci_tx_task(unsigned long arg)
        /* Send next queued raw (unknown type) packet */
        while ((skb = skb_dequeue(&hdev->raw_q)))
                hci_send_frame(skb);
-
-       read_unlock(&hci_task_lock);
 }
 
 /* ----- HCI RX task (incoming data processing) ----- */
@@ -2268,16 +2766,11 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
        hci_dev_unlock(hdev);
 
        if (conn) {
-               register struct hci_proto *hp;
-
-               hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+               hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
 
                /* Send to upper protocol */
-               hp = hci_proto[HCI_PROTO_L2CAP];
-               if (hp && hp->recv_acldata) {
-                       hp->recv_acldata(conn, skb, flags);
-                       return;
-               }
+               l2cap_recv_acldata(conn, skb, flags);
+               return;
        } else {
                BT_ERR("%s ACL packet for unknown connection handle %d",
                        hdev->name, handle);
@@ -2306,14 +2799,9 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
        hci_dev_unlock(hdev);
 
        if (conn) {
-               register struct hci_proto *hp;
-
                /* Send to upper protocol */
-               hp = hci_proto[HCI_PROTO_SCO];
-               if (hp && hp->recv_scodata) {
-                       hp->recv_scodata(conn, skb);
-                       return;
-               }
+               sco_recv_scodata(conn, skb);
+               return;
        } else {
                BT_ERR("%s SCO packet for unknown connection handle %d",
                        hdev->name, handle);
@@ -2322,15 +2810,13 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
        kfree_skb(skb);
 }
 
-static void hci_rx_task(unsigned long arg)
+static void hci_rx_work(struct work_struct *work)
 {
-       struct hci_dev *hdev = (struct hci_dev *) arg;
+       struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
        struct sk_buff *skb;
 
        BT_DBG("%s", hdev->name);
 
-       read_lock(&hci_task_lock);
-
        while ((skb = skb_dequeue(&hdev->rx_q))) {
                if (atomic_read(&hdev->promisc)) {
                        /* Send copy to the sockets */
@@ -2355,6 +2841,7 @@ static void hci_rx_task(unsigned long arg)
                /* Process frame */
                switch (bt_cb(skb)->pkt_type) {
                case HCI_EVENT_PKT:
+                       BT_DBG("%s Event packet", hdev->name);
                        hci_event_packet(hdev, skb);
                        break;
 
@@ -2373,13 +2860,11 @@ static void hci_rx_task(unsigned long arg)
                        break;
                }
        }
-
-       read_unlock(&hci_task_lock);
 }
 
-static void hci_cmd_task(unsigned long arg)
+static void hci_cmd_work(struct work_struct *work)
 {
-       struct hci_dev *hdev = (struct hci_dev *) arg;
+       struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
        struct sk_buff *skb;
 
        BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
@@ -2403,7 +2888,40 @@ static void hci_cmd_task(unsigned long arg)
                                  jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
                } else {
                        skb_queue_head(&hdev->cmd_q, skb);
-                       tasklet_schedule(&hdev->cmd_task);
+                       queue_work(hdev->workqueue, &hdev->cmd_work);
                }
        }
 }
+
+int hci_do_inquiry(struct hci_dev *hdev, u8 length)
+{
+       /* General inquiry access code (GIAC) */
+       u8 lap[3] = { 0x33, 0x8b, 0x9e };
+       struct hci_cp_inquiry cp;
+
+       BT_DBG("%s", hdev->name);
+
+       if (test_bit(HCI_INQUIRY, &hdev->flags))
+               return -EINPROGRESS;
+
+       inquiry_cache_flush(hdev);
+
+       memset(&cp, 0, sizeof(cp));
+       memcpy(&cp.lap, lap, sizeof(cp.lap));
+       cp.length  = length;
+
+       return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+}
+
+int hci_cancel_inquiry(struct hci_dev *hdev)
+{
+       BT_DBG("%s", hdev->name);
+
+       if (!test_bit(HCI_INQUIRY, &hdev->flags))
+               return -EPERM;
+
+       return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+}
+
+module_param(enable_hs, bool, 0644);
+MODULE_PARM_DESC(enable_hs, "Enable High Speed");