struct work_struct power_off;
struct timer_list off_timer;
+ __u16 discov_timeout;
+ struct delayed_work discov_off;
+
struct timer_list cmd_timer;
struct tasklet_struct cmd_task;
struct tasklet_struct rx_task;
tasklet_kill(&hdev->rx_task);
tasklet_kill(&hdev->tx_task);
+ if (hdev->discov_timeout > 0) {
+ cancel_delayed_work_sync(&hdev->discov_off);
+ hdev->discov_timeout = 0;
+ }
+
hci_dev_lock_bh(hdev);
inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
del_timer(&hdev->off_timer);
}
+static void hci_discov_off(struct work_struct *work)
+{
+ struct hci_dev *hdev;
+ u8 scan = SCAN_PAGE;
+
+ hdev = container_of(work, struct hci_dev, discov_off.work);
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock_bh(hdev);
+
+ hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
+
+ hdev->discov_timeout = 0;
+
+ hci_dev_unlock_bh(hdev);
+}
+
int hci_uuids_clear(struct hci_dev *hdev)
{
struct list_head *p, *n;
INIT_WORK(&hdev->power_off, hci_power_off);
setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
+ INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
+
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
atomic_set(&hdev->promisc, 0);
set_bit(HCI_ISCAN, &hdev->flags);
if (!old_iscan)
mgmt_discoverable(hdev->id, 1);
+ if (hdev->discov_timeout > 0) {
+ int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
+ queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+ to);
+ }
} else if (old_iscan)
mgmt_discoverable(hdev->id, 0);
static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
- struct mgmt_mode *cp;
+ struct mgmt_cp_set_discoverable *cp;
struct hci_dev *hdev;
struct pending_cmd *cmd;
u8 scan;
if (cp->val)
scan |= SCAN_INQUIRY;
+ else
+ cancel_delayed_work_sync(&hdev->discov_off);
err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
if (err < 0)
mgmt_pending_remove(cmd);
+ if (cp->val)
+ hdev->discov_timeout = get_unaligned_le16(&cp->timeout);
+
failed:
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);