Bluetooth: Fix stand-alone HCI command handling
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / hci_core.c
index 0f78e34220c9025aae08f3b38b49924b6c397ff6..4f8142bdf6558c8f791c5129b8332d25c03b73f1 100644 (file)
@@ -107,9 +107,9 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
 }
 
 /* Execute request and wait for completion. */
-static int __hci_request(struct hci_dev *hdev,
-                        void (*req)(struct hci_dev *hdev, unsigned long opt),
-                        unsigned long opt, __u32 timeout)
+static int __hci_req_sync(struct hci_dev *hdev,
+                         void (*req)(struct hci_dev *hdev, unsigned long opt),
+                         unsigned long opt, __u32 timeout)
 {
        DECLARE_WAITQUEUE(wait, current);
        int err = 0;
@@ -122,6 +122,14 @@ static int __hci_request(struct hci_dev *hdev,
        set_current_state(TASK_INTERRUPTIBLE);
 
        req(hdev, opt);
+
+       /* If the request didn't send any commands return immediately */
+       if (skb_queue_empty(&hdev->cmd_q) && atomic_read(&hdev->cmd_cnt)) {
+               hdev->req_status = 0;
+               remove_wait_queue(&hdev->req_wait_q, &wait);
+               return err;
+       }
+
        schedule_timeout(timeout);
 
        remove_wait_queue(&hdev->req_wait_q, &wait);
@@ -150,9 +158,9 @@ static int __hci_request(struct hci_dev *hdev,
        return err;
 }
 
-static int hci_request(struct hci_dev *hdev,
-                      void (*req)(struct hci_dev *hdev, unsigned long opt),
-                      unsigned long opt, __u32 timeout)
+static int hci_req_sync(struct hci_dev *hdev,
+                       void (*req)(struct hci_dev *hdev, unsigned long opt),
+                       unsigned long opt, __u32 timeout)
 {
        int ret;
 
@@ -161,7 +169,7 @@ static int hci_request(struct hci_dev *hdev,
 
        /* Serialize all requests */
        hci_req_lock(hdev);
-       ret = __hci_request(hdev, req, opt, timeout);
+       ret = __hci_req_sync(hdev, req, opt, timeout);
        hci_req_unlock(hdev);
 
        return ret;
@@ -185,6 +193,9 @@ static void bredr_init(struct hci_dev *hdev)
 
        /* Read Local Version */
        hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+
+       /* Read BD Address */
+       hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
 }
 
 static void amp_init(struct hci_dev *hdev)
@@ -201,7 +212,7 @@ static void amp_init(struct hci_dev *hdev)
        hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
 }
 
-static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
+static void hci_init1_req(struct hci_dev *hdev, unsigned long opt)
 {
        struct sk_buff *skb;
 
@@ -238,6 +249,273 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
        }
 }
 
+static void bredr_setup(struct hci_dev *hdev)
+{
+       struct hci_cp_delete_stored_link_key cp;
+       __le16 param;
+       __u8 flt_type;
+
+       /* Read Buffer Size (ACL mtu, max pkt, etc.) */
+       hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
+
+       /* Read Class of Device */
+       hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
+
+       /* Read Local Name */
+       hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
+
+       /* Read Voice Setting */
+       hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
+
+       /* Clear Event Filters */
+       flt_type = HCI_FLT_CLEAR_ALL;
+       hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
+
+       /* Connection accept timeout ~20 secs */
+       param = __constant_cpu_to_le16(0x7d00);
+       hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
+
+       bacpy(&cp.bdaddr, BDADDR_ANY);
+       cp.delete_all = 0x01;
+       hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
+}
+
+static void le_setup(struct hci_dev *hdev)
+{
+       /* Read LE Buffer Size */
+       hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
+
+       /* Read LE Local Supported Features */
+       hci_send_cmd(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
+
+       /* Read LE Advertising Channel TX Power */
+       hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
+
+       /* Read LE White List Size */
+       hci_send_cmd(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
+
+       /* Read LE Supported States */
+       hci_send_cmd(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
+}
+
+static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
+{
+       if (lmp_ext_inq_capable(hdev))
+               return 0x02;
+
+       if (lmp_inq_rssi_capable(hdev))
+               return 0x01;
+
+       if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
+           hdev->lmp_subver == 0x0757)
+               return 0x01;
+
+       if (hdev->manufacturer == 15) {
+               if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
+                       return 0x01;
+               if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
+                       return 0x01;
+               if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
+                       return 0x01;
+       }
+
+       if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
+           hdev->lmp_subver == 0x1805)
+               return 0x01;
+
+       return 0x00;
+}
+
+static void hci_setup_inquiry_mode(struct hci_dev *hdev)
+{
+       u8 mode;
+
+       mode = hci_get_inquiry_mode(hdev);
+
+       hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
+}
+
+static void hci_setup_event_mask(struct hci_dev *hdev)
+{
+       /* The second byte is 0xff instead of 0x9f (two reserved bits
+        * disabled) since a Broadcom 1.2 dongle doesn't respond to the
+        * command otherwise.
+        */
+       u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+       /* CSR 1.1 dongles does not accept any bitfield so don't try to set
+        * any event mask for pre 1.2 devices.
+        */
+       if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+               return;
+
+       if (lmp_bredr_capable(hdev)) {
+               events[4] |= 0x01; /* Flow Specification Complete */
+               events[4] |= 0x02; /* Inquiry Result with RSSI */
+               events[4] |= 0x04; /* Read Remote Extended Features Complete */
+               events[5] |= 0x08; /* Synchronous Connection Complete */
+               events[5] |= 0x10; /* Synchronous Connection Changed */
+       }
+
+       if (lmp_inq_rssi_capable(hdev))
+               events[4] |= 0x02; /* Inquiry Result with RSSI */
+
+       if (lmp_sniffsubr_capable(hdev))
+               events[5] |= 0x20; /* Sniff Subrating */
+
+       if (lmp_pause_enc_capable(hdev))
+               events[5] |= 0x80; /* Encryption Key Refresh Complete */
+
+       if (lmp_ext_inq_capable(hdev))
+               events[5] |= 0x40; /* Extended Inquiry Result */
+
+       if (lmp_no_flush_capable(hdev))
+               events[7] |= 0x01; /* Enhanced Flush Complete */
+
+       if (lmp_lsto_capable(hdev))
+               events[6] |= 0x80; /* Link Supervision Timeout Changed */
+
+       if (lmp_ssp_capable(hdev)) {
+               events[6] |= 0x01;      /* IO Capability Request */
+               events[6] |= 0x02;      /* IO Capability Response */
+               events[6] |= 0x04;      /* User Confirmation Request */
+               events[6] |= 0x08;      /* User Passkey Request */
+               events[6] |= 0x10;      /* Remote OOB Data Request */
+               events[6] |= 0x20;      /* Simple Pairing Complete */
+               events[7] |= 0x04;      /* User Passkey Notification */
+               events[7] |= 0x08;      /* Keypress Notification */
+               events[7] |= 0x10;      /* Remote Host Supported
+                                        * Features Notification
+                                        */
+       }
+
+       if (lmp_le_capable(hdev))
+               events[7] |= 0x20;      /* LE Meta-Event */
+
+       hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
+
+       if (lmp_le_capable(hdev)) {
+               memset(events, 0, sizeof(events));
+               events[0] = 0x1f;
+               hci_send_cmd(hdev, HCI_OP_LE_SET_EVENT_MASK,
+                            sizeof(events), events);
+       }
+}
+
+static void hci_init2_req(struct hci_dev *hdev, unsigned long opt)
+{
+       if (lmp_bredr_capable(hdev))
+               bredr_setup(hdev);
+
+       if (lmp_le_capable(hdev))
+               le_setup(hdev);
+
+       hci_setup_event_mask(hdev);
+
+       if (hdev->hci_ver > BLUETOOTH_VER_1_1)
+               hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
+
+       if (lmp_ssp_capable(hdev)) {
+               if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+                       u8 mode = 0x01;
+                       hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
+                                    sizeof(mode), &mode);
+               } else {
+                       struct hci_cp_write_eir cp;
+
+                       memset(hdev->eir, 0, sizeof(hdev->eir));
+                       memset(&cp, 0, sizeof(cp));
+
+                       hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
+               }
+       }
+
+       if (lmp_inq_rssi_capable(hdev))
+               hci_setup_inquiry_mode(hdev);
+
+       if (lmp_inq_tx_pwr_capable(hdev))
+               hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
+
+       if (lmp_ext_feat_capable(hdev)) {
+               struct hci_cp_read_local_ext_features cp;
+
+               cp.page = 0x01;
+               hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
+                            &cp);
+       }
+
+       if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
+               u8 enable = 1;
+               hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
+                            &enable);
+       }
+}
+
+static void hci_setup_link_policy(struct hci_dev *hdev)
+{
+       struct hci_cp_write_def_link_policy cp;
+       u16 link_policy = 0;
+
+       if (lmp_rswitch_capable(hdev))
+               link_policy |= HCI_LP_RSWITCH;
+       if (lmp_hold_capable(hdev))
+               link_policy |= HCI_LP_HOLD;
+       if (lmp_sniff_capable(hdev))
+               link_policy |= HCI_LP_SNIFF;
+       if (lmp_park_capable(hdev))
+               link_policy |= HCI_LP_PARK;
+
+       cp.policy = cpu_to_le16(link_policy);
+       hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
+}
+
+static void hci_set_le_support(struct hci_dev *hdev)
+{
+       struct hci_cp_write_le_host_supported cp;
+
+       memset(&cp, 0, sizeof(cp));
+
+       if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+               cp.le = 0x01;
+               cp.simul = lmp_le_br_capable(hdev);
+       }
+
+       if (cp.le != lmp_host_le_capable(hdev))
+               hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
+                            &cp);
+}
+
+static void hci_init3_req(struct hci_dev *hdev, unsigned long opt)
+{
+       if (hdev->commands[5] & 0x10)
+               hci_setup_link_policy(hdev);
+
+       if (lmp_le_capable(hdev))
+               hci_set_le_support(hdev);
+}
+
+static int __hci_init(struct hci_dev *hdev)
+{
+       int err;
+
+       err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
+       if (err < 0)
+               return err;
+
+       /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
+        * BR/EDR/LE type controllers. AMP controllers only need the
+        * first stage init.
+        */
+       if (hdev->dev_type != HCI_BREDR)
+               return 0;
+
+       err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
+       if (err < 0)
+               return err;
+
+       return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
+}
+
 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
 {
        __u8 scan = opt;
@@ -556,7 +834,8 @@ int hci_inquiry(void __user *arg)
        timeo = ir.length * msecs_to_jiffies(2000);
 
        if (do_inquiry) {
-               err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
+               err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
+                                  timeo);
                if (err < 0)
                        goto done;
        }
@@ -737,7 +1016,7 @@ int hci_dev_open(__u16 dev)
                set_bit(HCI_INIT, &hdev->flags);
                hdev->init_last_cmd = 0;
 
-               ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
+               ret = __hci_init(hdev);
 
                clear_bit(HCI_INIT, &hdev->flags);
        }
@@ -828,7 +1107,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        if (!test_bit(HCI_RAW, &hdev->flags) &&
            test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
                set_bit(HCI_INIT, &hdev->flags);
-               __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
+               __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
                clear_bit(HCI_INIT, &hdev->flags);
        }
 
@@ -921,7 +1200,7 @@ int hci_dev_reset(__u16 dev)
        hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
 
        if (!test_bit(HCI_RAW, &hdev->flags))
-               ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
+               ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
 
 done:
        hci_req_unlock(hdev);
@@ -960,8 +1239,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
 
        switch (cmd) {
        case HCISETAUTH:
-               err = hci_request(hdev, hci_auth_req, dr.dev_opt,
-                                 HCI_INIT_TIMEOUT);
+               err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
+                                  HCI_INIT_TIMEOUT);
                break;
 
        case HCISETENCRYPT:
@@ -972,24 +1251,24 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
 
                if (!test_bit(HCI_AUTH, &hdev->flags)) {
                        /* Auth must be enabled first */
-                       err = hci_request(hdev, hci_auth_req, dr.dev_opt,
-                                         HCI_INIT_TIMEOUT);
+                       err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
+                                          HCI_INIT_TIMEOUT);
                        if (err)
                                break;
                }
 
-               err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
-                                 HCI_INIT_TIMEOUT);
+               err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
+                                  HCI_INIT_TIMEOUT);
                break;
 
        case HCISETSCAN:
-               err = hci_request(hdev, hci_scan_req, dr.dev_opt,
-                                 HCI_INIT_TIMEOUT);
+               err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
+                                  HCI_INIT_TIMEOUT);
                break;
 
        case HCISETLINKPOL:
-               err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
-                                 HCI_INIT_TIMEOUT);
+               err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
+                                  HCI_INIT_TIMEOUT);
                break;
 
        case HCISETLINKMODE:
@@ -1146,7 +1425,8 @@ static void hci_power_on(struct work_struct *work)
                return;
 
        if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-               schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
+               queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
+                                  HCI_AUTO_OFF_TIMEOUT);
 
        if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
                mgmt_index_added(hdev);
@@ -1182,14 +1462,10 @@ static void hci_discov_off(struct work_struct *work)
 
 int hci_uuids_clear(struct hci_dev *hdev)
 {
-       struct list_head *p, *n;
-
-       list_for_each_safe(p, n, &hdev->uuids) {
-               struct bt_uuid *uuid;
-
-               uuid = list_entry(p, struct bt_uuid, list);
+       struct bt_uuid *uuid, *tmp;
 
-               list_del(p);
+       list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
+               list_del(&uuid->list);
                kfree(uuid);
        }
 
@@ -1611,18 +1887,18 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
 
        hci_req_lock(hdev);
 
-       err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
-                           timeo);
+       err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
+                            timeo);
        if (!err)
-               err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
+               err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
 
        hci_req_unlock(hdev);
 
        if (err < 0)
                return err;
 
-       schedule_delayed_work(&hdev->le_scan_disable,
-                             msecs_to_jiffies(timeout));
+       queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
+                          msecs_to_jiffies(timeout));
 
        return 0;
 }
@@ -1799,6 +2075,15 @@ int hci_register_dev(struct hci_dev *hdev)
                goto err;
        }
 
+       hdev->req_workqueue = alloc_workqueue(hdev->name,
+                                             WQ_HIGHPRI | WQ_UNBOUND |
+                                             WQ_MEM_RECLAIM, 1);
+       if (!hdev->req_workqueue) {
+               destroy_workqueue(hdev->workqueue);
+               error = -ENOMEM;
+               goto err;
+       }
+
        error = hci_add_sysfs(hdev);
        if (error < 0)
                goto err_wqueue;
@@ -1821,12 +2106,13 @@ int hci_register_dev(struct hci_dev *hdev)
        hci_notify(hdev, HCI_DEV_REG);
        hci_dev_hold(hdev);
 
-       schedule_work(&hdev->power_on);
+       queue_work(hdev->req_workqueue, &hdev->power_on);
 
        return id;
 
 err_wqueue:
        destroy_workqueue(hdev->workqueue);
+       destroy_workqueue(hdev->req_workqueue);
 err:
        ida_simple_remove(&hci_index_ida, hdev->id);
        write_lock(&hci_dev_list_lock);
@@ -1880,6 +2166,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
        hci_del_sysfs(hdev);
 
        destroy_workqueue(hdev->workqueue);
+       destroy_workqueue(hdev->req_workqueue);
 
        hci_dev_lock(hdev);
        hci_blacklist_clear(hdev);
@@ -1921,7 +2208,7 @@ int hci_recv_frame(struct sk_buff *skb)
                return -ENXIO;
        }
 
-       /* Incomming skb */
+       /* Incoming skb */
        bt_cb(skb)->incoming = 1;
 
        /* Time stamp */
@@ -2152,20 +2439,46 @@ static int hci_send_frame(struct sk_buff *skb)
        return hdev->send(skb);
 }
 
-/* Send HCI command */
-int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
+void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
+{
+       skb_queue_head_init(&req->cmd_q);
+       req->hdev = hdev;
+}
+
+int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct sk_buff *skb;
+       unsigned long flags;
+
+       BT_DBG("length %u", skb_queue_len(&req->cmd_q));
+
+       /* Do not allow empty requests */
+       if (skb_queue_empty(&req->cmd_q))
+               return -EINVAL;
+
+       skb = skb_peek_tail(&req->cmd_q);
+       bt_cb(skb)->req.complete = complete;
+
+       spin_lock_irqsave(&hdev->cmd_q.lock, flags);
+       skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
+       spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
+
+       queue_work(hdev->workqueue, &hdev->cmd_work);
+
+       return 0;
+}
+
+static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
+                                      u32 plen, void *param)
 {
        int len = HCI_COMMAND_HDR_SIZE + plen;
        struct hci_command_hdr *hdr;
        struct sk_buff *skb;
 
-       BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
-
        skb = bt_skb_alloc(len, GFP_ATOMIC);
-       if (!skb) {
-               BT_ERR("%s no memory for command", hdev->name);
-               return -ENOMEM;
-       }
+       if (!skb)
+               return NULL;
 
        hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
        hdr->opcode = cpu_to_le16(opcode);
@@ -2179,15 +2492,58 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
        bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
        skb->dev = (void *) hdev;
 
+       return skb;
+}
+
+/* Send HCI command */
+int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
+{
+       struct sk_buff *skb;
+
+       BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
+
+       skb = hci_prepare_cmd(hdev, opcode, plen, param);
+       if (!skb) {
+               BT_ERR("%s no memory for command", hdev->name);
+               return -ENOMEM;
+       }
+
        if (test_bit(HCI_INIT, &hdev->flags))
                hdev->init_last_cmd = opcode;
 
+       /* Stand-alone HCI commands must be flaged as
+        * single-command requests.
+        */
+       bt_cb(skb)->req.start = true;
+
        skb_queue_tail(&hdev->cmd_q, skb);
        queue_work(hdev->workqueue, &hdev->cmd_work);
 
        return 0;
 }
 
+/* Queue a command to an asynchronous HCI request */
+int hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct sk_buff *skb;
+
+       BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
+
+       skb = hci_prepare_cmd(hdev, opcode, plen, param);
+       if (!skb) {
+               BT_ERR("%s no memory for command", hdev->name);
+               return -ENOMEM;
+       }
+
+       if (skb_queue_empty(&req->cmd_q))
+               bt_cb(skb)->req.start = true;
+
+       skb_queue_tail(&req->cmd_q, skb);
+
+       return 0;
+}
+
 /* Get data from the previously sent command */
 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
 {
@@ -2390,7 +2746,7 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
                if (c->type == type && c->sent) {
                        BT_ERR("%s killing stalled connection %pMR",
                               hdev->name, &c->dst);
-                       hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
+                       hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
                }
        }