[COMMON] scsi: ufs: exynos: fix build error for kernel 4.14
authorhgchu <hg.chu@samsung.com>
Fri, 12 Jan 2018 11:42:57 +0000 (20:42 +0900)
committerJaeHun Jung <jh0801.jung@samsung.com>
Tue, 8 May 2018 08:20:55 +0000 (17:20 +0900)
Change-Id: Id79757849bb55c0b654105231335293e5fcd9f04
Signed-off-by: hgchu <hg.chu@samsung.com>
drivers/scsi/scsi_sysfs.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h

index 0ffb333d1c6ab211adfed4108ac953dc895c6cf0..528ac9070efed6c11b949535658f5a5428127480 100644 (file)
@@ -1237,6 +1237,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
 
        transport_configure_device(&starget->dev);
 
+       device_enable_async_suspend(&sdev->sdev_gendev);
        scsi_autopm_get_target(starget);
        pm_runtime_set_active(&sdev->sdev_gendev);
        pm_runtime_forbid(&sdev->sdev_gendev);
@@ -1261,6 +1262,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
                return error;
        }
 
+       device_enable_async_suspend(&sdev->sdev_dev);
        error = device_add(&sdev->sdev_dev);
        if (error) {
                sdev_printk(KERN_INFO, sdev,
index 62c08f9bbbc6a0a2f2d5860fcd186ff00f81062a..d430722f7cadc95c075f9791345836233eb9e84e 100644 (file)
@@ -35,7 +35,8 @@
 config SCSI_UFSHCD
        tristate "Universal Flash Storage Controller Driver Core"
        depends on SCSI && SCSI_DMA
-
+       select PM_DEVFREQ
+       select DEVFREQ_GOV_SIMPLE_ONDEMAND
        select NLS
        ---help---
        This selects the support for UFS devices in Linux, say Y and make
index b1ed0f6e87744d2fc1f54182f8b412531794ade4..5bb632d8f608b9385236ebfc43d834c952a09062 100644 (file)
 #include <linux/devfreq.h>
 #endif
 #include <linux/nls.h>
+#include <linux/smc.h>
 #include <scsi/ufs/ioctl.h>
 #include <linux/of.h>
+#include <linux/blkdev.h>
 #include "ufshcd.h"
 #include "ufs_quirks.h"
 #include "unipro.h"
 #define QUERY_REQ_RETRIES 3
 /* Query request timeout */
 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+/*
+ * Query request timeout for fDeviceInit flag
+ * fDeviceInit query response time for some devices is too large that default
+ * QUERY_REQ_TIMEOUT may not be enough for such devices.
+ */
+#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
 
 /* Task management command timeout */
 #define TM_CMD_TIMEOUT 300 /* msecs */
@@ -737,7 +745,16 @@ static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
  */
 static inline int ufshcd_get_lists_status(u32 reg)
 {
-       return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
+       /*
+        * The mask 0xFF is for the following HCS register bits
+        * Bit          Description
+        *  0           Device Present
+        *  1           UTRLRDY
+        *  2           UTMRLRDY
+        *  3           UCRDY
+        * 4-7          reserved
+        */
+       return ((reg & 0xFF) >> 1) ^ 0x07;
 }
 
 /**
@@ -1531,21 +1548,6 @@ start:
                if (async)
                        hba->clk_gating.active_reqs--;
        case CLKS_ON:
-               /*
-                * Wait for the ungate work to complete if in progress.
-                * Though the clocks may be in ON state, the link could
-                * still be in hibner8 state if hibern8 is allowed
-                * during clock gating.
-                * Make sure we exit hibern8 state also in addition to
-                * clocks being ON.
-                */
-               if (ufshcd_can_hibern8_during_gating(hba) &&
-                   ufshcd_is_link_hibern8(hba)) {
-                       spin_unlock_irqrestore(hba->host->host_lock, flags);
-                       flush_work(&hba->clk_gating.ungate_work);
-                       spin_lock_irqsave(hba->host->host_lock, flags);
-                       goto start;
-               }
                break;
        case REQ_CLKS_OFF:
                if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
@@ -1636,6 +1638,7 @@ static void ufshcd_gate_work(struct work_struct *work)
                        hba->clk_gating.state = CLKS_ON;
                        spin_unlock_irqrestore(hba->host->host_lock, flags);
                        hba->clk_gating.is_suspended = false;
+                       scsi_unblock_requests(hba->host);
                        trace_ufshcd_clk_gating(dev_name(hba->dev),
                                                hba->clk_gating.state);
                        goto out;
@@ -2453,6 +2456,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 
        if (!down_read_trylock(&hba->clk_scaling_lock))
                return SCSI_MLQUEUE_HOST_BUSY;
+
        if ((ufs_shutdown_state == 1) && (cmd->cmnd[0] == START_STOP)) {
                scsi_block_requests(hba->host);
                cancel_work_sync(&hba->clk_gating.ungate_work);
@@ -2742,6 +2746,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
                if (!ufshcd_is_link_active(hba))
                        return -EPERM;
        }
+
        down_read(&hba->clk_scaling_lock);
 
        /*
@@ -2872,6 +2877,9 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
                goto out_unlock;
        }
 
+       if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
+               timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
+
        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
 
        if (err) {
@@ -3922,8 +3930,6 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
        struct uic_command uic_cmd = {0};
        ktime_t start = ktime_get();
 
-       ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
-
        uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
        trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
@@ -3939,9 +3945,7 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
                 */
                if (ufshcd_link_recovery(hba))
                        ret = -ENOLINK;
-       } else
-               ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
-                                                               POST_CHANGE);
+       }
 
        return ret;
 }
@@ -3965,7 +3969,6 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
        int ret;
        ktime_t start = ktime_get();
 
-       ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
 
        uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
@@ -3977,8 +3980,7 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
                        __func__, ret);
                ret = ufshcd_link_recovery(hba);
        } else {
-               ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
-                                                               POST_CHANGE);
+
                hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
                hba->ufs_stats.hibern8_exit_cnt++;
        }
@@ -4014,6 +4016,7 @@ static int ufshcd_link_hibern8_ctrl(struct ufs_hba *hba, bool en)
        else
                ret = ufshcd_uic_hibern8_exit(hba);
 
+
        if (ret || (hba->saved_err & INT_FATAL_ERRORS) ||
                ((hba->saved_err & UIC_ERROR) &&
                ((hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
@@ -4033,6 +4036,7 @@ static int ufshcd_link_hibern8_ctrl(struct ufs_hba *hba, bool en)
 
        if (hba->vops && hba->vops->hibern8_notify)
                hba->vops->hibern8_notify(hba, en, POST_CHANGE);
+
 out:
        hba->tcx_replay_timer_expired_cnt = 0;
        hba->fcx_protection_timer_expired_cnt = 0;
@@ -4066,6 +4070,8 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
                                __func__,
                                pwr_info->lane_rx,
                                pwr_info->lane_tx);
+               return -EINVAL;
+       }
 
        hba->tcx_replay_timer_expired_cnt = 0;
        hba->fcx_protection_timer_expired_cnt = 0;
@@ -4184,6 +4190,7 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
                        sizeof(struct ufs_pa_layer_attr));
        }
 
+out:
        return ret;
 }
 
@@ -4450,8 +4457,10 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
 
        if (hba->vops && hba->vops->host_reset)
                hba->vops->host_reset(hba);
+
        if (hba->quirks & UFSHCD_QUIRK_USE_OF_HCE) {
                ufshcd_set_link_off(hba);
+
                /* enable UIC related interrupts */
                ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
 
@@ -4971,6 +4980,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, int reason,
                        exynos_ufs_cmd_log_end(hba, index);
 #endif
                        __ufshcd_release(hba);
+
                        if (hba->monitor.flag & UFSHCD_MONITOR_LEVEL1)
                                dev_info(hba->dev, "Transfer Done(%d)\n",
                                                index);
@@ -5532,9 +5542,9 @@ skip_pending_xfer_clear:
                        spin_lock_irqsave(hba->host->host_lock, flags);
                        hba->ufshcd_state = UFSHCD_STATE_ERROR;
                        spin_unlock_irqrestore(hba->host->host_lock, flags);
+
                        dev_err(hba->dev, "%s: reset and restore failed\n",
                                        __func__);
-                       hba->ufshcd_state = UFSHCD_STATE_ERROR;
                }
                hba->saved_err = 0;
                hba->saved_uic_err = 0;
@@ -5832,8 +5842,6 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
        task_req_upiup->input_param1 = cpu_to_be32(lun_id);
        task_req_upiup->input_param2 = cpu_to_be32(task_id);
 
-       ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
-
        /* send command to the controller */
        if (hba->vops && hba->vops->set_nexus_t_task_mgmt)
                hba->vops->set_nexus_t_task_mgmt(hba, free_slot, tm_function);
@@ -6008,6 +6016,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
        }
 
        ufshcd_hold(hba, false);
+
        /* Dump debugging information to system memory */
        ufshcd_vops_dbg_register_dump(hba);
        reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -6605,76 +6614,6 @@ out:
        return ret;
 }
 
-/**
- * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
- * less than device PA_TACTIVATE time.
- * @hba: per-adapter instance
- *
- * Some UFS devices require host PA_TACTIVATE to be lower than device
- * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
- * for such devices.
- *
- * Returns zero on success, non-zero error value on failure.
- */
-static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
-{
-       int ret = 0;
-       u32 granularity, peer_granularity;
-       u32 pa_tactivate, peer_pa_tactivate;
-       u32 pa_tactivate_us, peer_pa_tactivate_us;
-       u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
-
-       ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
-                                 &granularity);
-       if (ret)
-               goto out;
-
-       ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
-                                 &peer_granularity);
-       if (ret)
-               goto out;
-
-       if ((granularity < PA_GRANULARITY_MIN_VAL) ||
-           (granularity > PA_GRANULARITY_MAX_VAL)) {
-               dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
-                       __func__, granularity);
-               return -EINVAL;
-       }
-
-       if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
-           (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
-               dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
-                       __func__, peer_granularity);
-               return -EINVAL;
-       }
-
-       ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
-       if (ret)
-               goto out;
-
-       ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
-                                 &peer_pa_tactivate);
-       if (ret)
-               goto out;
-
-       pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
-       peer_pa_tactivate_us = peer_pa_tactivate *
-                            gran_to_us_table[peer_granularity - 1];
-
-       if (pa_tactivate_us > peer_pa_tactivate_us) {
-               u32 new_peer_pa_tactivate;
-
-               new_peer_pa_tactivate = pa_tactivate_us /
-                                     gran_to_us_table[peer_granularity - 1];
-               new_peer_pa_tactivate++;
-               ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
-                                         new_peer_pa_tactivate);
-       }
-
-out:
-       return ret;
-}
-
 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
 {
        if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
@@ -6686,10 +6625,7 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
                /* set 1ms timeout for PA_TACTIVATE */
                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
 
-       if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
-               ufshcd_quirk_tune_host_pa_tactivate(hba);
 
-       ufshcd_vops_apply_dev_quirks(hba);
 }
 
 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
@@ -7258,6 +7194,7 @@ static struct scsi_host_template ufshcd_driver_template = {
        .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
        .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
        .eh_timed_out           = ufshcd_eh_timed_out,
+       .ioctl                  = ufshcd_ioctl,
        .this_id                = -1,
        .sg_tablesize           = SG_ALL,
        .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
@@ -7542,9 +7479,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
                }
        }
 
-       ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
-       if (ret)
-               return ret;
+       ret = ufshcd_vops_setup_clocks(hba, on);
 
 out:
        if (ret) {
@@ -8059,6 +7994,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
 disable_clks:
 
+
        /*
         * Flush pending works before clock is disabled
         */
@@ -8104,6 +8040,11 @@ set_link_active:
 
        if (ufshcd_is_shutdown_pm(pm_op))
                goto out;
+
+       ret = ufshcd_enable_irq(hba);
+       if (ret)
+               goto out;
+
        if (ufshcd_is_link_hibern8(hba)) {
                ufshcd_set_link_trans_active(hba);
                if (!ufshcd_link_hibern8_ctrl(hba, false))
@@ -8180,6 +8121,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                if (ret)
                        goto disable_vreg;
        }
+
+       /* enable the host irq as host controller would be active soon */
+       ret = ufshcd_enable_irq(hba);
+       if (ret)
+               goto disable_irq_and_vops_clks;
+
        if (ufshcd_is_link_hibern8(hba)) {
                ufshcd_set_link_trans_active(hba);
                ret = ufshcd_link_hibern8_ctrl(hba, false);
@@ -8242,7 +8189,9 @@ disable_irq_and_vops_clks:
        ufshcd_disable_irq(hba);
        if (hba->clk_scaling.is_allowed)
                ufshcd_suspend_clkscaling(hba);
-       ufshcd_setup_clocks(hba, false);
+
+       if (gating_allowed)
+               ufshcd_setup_clocks(hba, false);
 disable_vreg:
        ufshcd_vreg_set_lpm(hba);
 out:
@@ -8822,8 +8771,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
        return 0;
 
-out_remove_scsi_host:
-       scsi_remove_host(hba->host);
 exit_gating:
        ufshcd_exit_clk_gating(hba);
 out_disable:
index 319539ad8159802ed3f736d4ee3f3c30e4304219..7c88ddd77eed771ed6cbb2dbf22197c5f0a8747c 100644 (file)
@@ -303,12 +303,9 @@ struct ufs_pwr_mode_info {
  * @pwr_change_notify: called before and after a power mode change
  *                     is carried out to allow vendor spesific capabilities
  *                     to be set.
- * @setup_xfer_req: called before any transfer request is issued
- *                  to set some things
- * @setup_task_mgmt: called before any task management request is issued
- *                  to set some things
+
  * @hibern8_notify: called around hibern8 enter/exit
- * @apply_dev_quirks: called to apply device specific quirks
+
  * @suspend: called during host controller PM callback
  * @resume: called during host controller PM callback
  * @dbg_register_dump: used to dump controller debug information
@@ -321,9 +318,8 @@ struct ufs_hba_variant_ops {
        u32     (*get_ufs_hci_version)(struct ufs_hba *);
        int     (*clk_scale_notify)(struct ufs_hba *, bool,
                                    enum ufs_notify_change_status);
-       int     (*pre_setup_clocks)(struct ufs_hba *, bool);                                    
-       int     (*setup_clocks)(struct ufs_hba *, bool,
-                               enum ufs_notify_change_status);
+       int     (*pre_setup_clocks)(struct ufs_hba *, bool);
+       int     (*setup_clocks)(struct ufs_hba *, bool);
        int     (*setup_regulators)(struct ufs_hba *, bool);
        void    (*host_reset)(struct ufs_hba *);
        int     (*hce_enable_notify)(struct ufs_hba *,
@@ -338,7 +334,6 @@ struct ufs_hba_variant_ops {
                                        int, struct scsi_cmnd *);
        void    (*set_nexus_t_task_mgmt)(struct ufs_hba *, int, u8);
        void    (*hibern8_notify)(struct ufs_hba *, u8, bool);
-       int     (*apply_dev_quirks)(struct ufs_hba *);
        int     (*suspend)(struct ufs_hba *, enum ufs_pm_op);
        int     (*resume)(struct ufs_hba *, enum ufs_pm_op);
        void    (*dbg_register_dump)(struct ufs_hba *hba);
@@ -429,6 +424,7 @@ struct ufs_clk_scaling {
  */
 struct ufs_init_prefetch {
        u32 icc_level;
+};
 
 /**
  * struct ufs_monitor - monitors ufs driver's behaviors
@@ -439,6 +435,10 @@ struct ufs_monitor {
 #define UFSHCD_MONITOR_LEVEL1  (1 << 0)
 #define UFSHCD_MONITOR_LEVEL2  (1 << 1)
 };
+
+struct ufs_secure_log {
+       unsigned long paddr;
+       u32 *vaddr;
 };
 
 #define UIC_ERR_REG_HIST_LENGTH 8
@@ -561,6 +561,7 @@ struct ufs_hba {
        struct device_attribute rpm_lvl_attr;
        struct device_attribute spm_lvl_attr;
        int pm_op_in_progress;
+       bool async_resume;
 
        struct ufshcd_lrb *lrb;
        volatile unsigned long lrb_in_use;
@@ -625,9 +626,11 @@ struct ufs_hba {
        #define UFSHCD_QUIRK_PRDT_BYTE_GRAN                     UFS_BIT(7)
 
        #define UFSHCD_QUIRK_USE_OF_HCE                         UFS_BIT(8)
+       #define UFSHCD_QUIRK_GET_UPMCRS_DIRECT                  UFS_BIT(9)
        #define UFSHCI_QUIRK_SKIP_INTR_AGGR                     UFS_BIT(10)
        #define UFSHCD_QUIRK_GET_GENERRCODE_DIRECT              UFS_BIT(11)
        #define UFSHCD_QUIRK_UNRESET_INTR_AGGR                  UFS_BIT(12)
+
        unsigned int quirks;    /* Deviations from standard UFSHCI spec. */
 
        /* Device deviations from standard UFS device spec. */
@@ -712,6 +715,9 @@ struct ufs_hba {
         */
 #define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
 
+       /* Allow only hibern8 without clk gating */
+#define UFSHCD_CAP_FAKE_CLK_GATING (1 << 6)
+
        struct devfreq *devfreq;
        struct ufs_clk_scaling clk_scaling;
        bool is_sys_suspended;
@@ -725,6 +731,7 @@ struct ufs_hba {
 
        struct rw_semaphore clk_scaling_lock;
        struct ufs_desc_size desc_size;
+       struct ufs_secure_log secure_log;
 };
 
 /* Returns true if clocks can be gated. Otherwise false */
@@ -955,12 +962,10 @@ static inline int ufshcd_vops_pre_setup_clocks(struct ufs_hba *hba, bool on)
        return 0;
 }
 
-
-static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
-                                       enum ufs_notify_change_status status)
+static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on)
 {
        if (hba->vops && hba->vops->setup_clocks)
-               return hba->vops->setup_clocks(hba, on, status);
+               return hba->vops->setup_clocks(hba, on);
        return 0;
 }
 
@@ -1001,35 +1006,6 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
        return -ENOTSUPP;
 }
 
-static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
-                                       bool is_scsi_cmd)
-{
-       if (hba->vops && hba->vops->setup_xfer_req)
-               return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
-}
-
-static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
-                                       int tag, u8 tm_function)
-{
-       if (hba->vops && hba->vops->setup_task_mgmt)
-               return hba->vops->setup_task_mgmt(hba, tag, tm_function);
-}
-
-static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
-                                       enum uic_cmd_dme cmd,
-                                       enum ufs_notify_change_status status)
-{
-       if (hba->vops && hba->vops->hibern8_notify)
-               return hba->vops->hibern8_notify(hba, cmd, status);
-}
-
-static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
-{
-       if (hba->vops && hba->vops->apply_dev_quirks)
-               return hba->vops->apply_dev_quirks(hba);
-       return 0;
-}
-
 static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
 {
        if (hba->vops && hba->vops->suspend)