Saves 1392 bytes of .rodata strings.
Also change a few function/macro prototypes in i915_gem_gtt.c
from dev to dev_priv where it made more sense to do so.
v2: Add parantheses around dev_priv. (Ville Syrjala)
v3: Mention function prototype changes. (David Weinehall)
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: David Weinehall <david.weinehall@linux.intel.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Jani Nikula <jani.nikula@linux.intel.com>
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: David Weinehall <david.weinehall@linux.intel.com>
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&
#define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell)
#define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell)
#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake)
-#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
+#define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton)
#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
#define BXT_REVID_B0 0x3
#define BXT_REVID_C0 0x9
-#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
+#define IS_BXT_REVID(dev_priv, since, until) \
+ (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
#define KBL_REVID_A0 0x0
#define KBL_REVID_B0 0x1
/* We use the flushing unmap only with ppgtt structures:
* page directories, page tables and scratch pages.
*/
-static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
+static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
{
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
- if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
}
#define kmap_px(px) kmap_page_dma(px_base(px))
-#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
+#define kunmap_px(ppgtt, vaddr) \
+ kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr))
#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
-#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
-#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
+#define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
+#define fill32_px(dev_priv, px, v) \
+ fill_page_dma_32((dev_priv), px_base(px), (v))
-static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
- const uint64_t val)
+static void fill_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p, const uint64_t val)
{
int i;
uint64_t * const vaddr = kmap_page_dma(p);
for (i = 0; i < 512; i++)
vaddr[i] = val;
- kunmap_page_dma(dev, vaddr);
+ kunmap_page_dma(dev_priv, vaddr);
}
-static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
- const uint32_t val32)
+static void fill_page_dma_32(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p, const uint32_t val32)
{
uint64_t v = val32;
v = v << 32 | val32;
- fill_page_dma(dev, p, v);
+ fill_page_dma(dev_priv, p, v);
}
static int
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, true);
- fill_px(vm->dev, pt, scratch_pte);
+ fill_px(to_i915(vm->dev), pt, scratch_pte);
}
static void gen6_initialize_pt(struct i915_address_space *vm,
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, true, 0);
- fill32_px(vm->dev, pt, scratch_pte);
+ fill32_px(to_i915(vm->dev), pt, scratch_pte);
}
static struct i915_page_directory *alloc_pd(struct drm_device *dev)
scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
- fill_px(vm->dev, pd, scratch_pde);
+ fill_px(to_i915(vm->dev), pd, scratch_pde);
}
static int __pdp_init(struct drm_device *dev,
scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
- fill_px(vm->dev, pdp, scratch_pdpe);
+ fill_px(to_i915(vm->dev), pdp, scratch_pdpe);
}
static void gen8_initialize_pml4(struct i915_address_space *vm,
scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
I915_CACHE_LLC);
- fill_px(vm->dev, pml4, scratch_pml4e);
+ fill_px(to_i915(vm->dev), pml4, scratch_pml4e);
}
static void
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
else if (IS_SKYLAKE(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_BROXTON(ggtt->base.dev))
+ if (IS_BROXTON(to_i915(ggtt->base.dev)))
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
ggtt->base.closed = false;
if (INTEL_INFO(dev)->gen >= 8) {
- if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
* configuration so that we use the proper lane count for our
* calculations.
*/
- if (IS_BROXTON(dev) && port == PORT_A) {
+ if (IS_BROXTON(dev_priv) && port == PORT_A) {
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) && port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else
dev_priv->hotplug.irq_port[port] = intel_dig_port;
* the given connectors.
*/
-static bool intel_PLL_is_valid(struct drm_device *dev,
+static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
const struct intel_limit *limit,
const struct dpll *clock)
{
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid("m1 out of range\n");
- if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
+ if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv))
if (clock->m1 <= clock->m2)
INTELPllInvalid("m1 <= m2\n");
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_BROXTON(dev_priv)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
if (match_clock &&
int this_err;
pnv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
if (match_clock &&
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
vlv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
chv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit, &clock))
+ if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
continue;
if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
max_cdclk = 308571;
dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
dev_priv->max_cdclk_freq = 624000;
} else if (IS_BROADWELL(dev_priv)) {
/*
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
else
haswell_get_ddi_pll(dev_priv, port, pipe_config);
DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
"pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
if (intel_crt_present(dev))
intel_crt_init(dev);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
/*
* FIXME: Broxton doesn't support port detection via the
* DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
struct intel_encoder *encoder;
if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
- !IS_BROXTON(dev)))
+ !IS_BROXTON(dev_priv)))
return;
/*
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
intel_dp->pps_reset = true;
else
intel_dp->pps_pipe = INVALID_PIPE;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
signal_levels = 0;
else
mask = DDI_BUF_EMP_MASK;
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_div);
else
I915_WRITE(regs.pp_div, pp_div);
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
- IS_BROXTON(dev) ?
+ IS_BROXTON(dev_priv) ?
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(regs.pp_div));
}
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A;
break;
case PORT_C:
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
dpll_mgr = &skl_pll_mgr;
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
dpll_mgr = &bxt_pll_mgr;
else if (HAS_DDI(dev_priv))
dpll_mgr = &hsw_pll_mgr;
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_device_ready(encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_dsi_device_ready(encoder);
}
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
+ i915_reg_t ctrl_reg = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pclk;
DRM_DEBUG_KMS("\n");
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(port), tmp |
READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
enum pipe pipe = intel_crtc->pipe;
tmp = I915_READ(MIPI_CTRL(port));
I915_WRITE(MIPI_INIT_COUNT(port),
txclkesc(intel_dsi->escape_clk_div, 100));
- if (IS_BROXTON(dev) && (!intel_dsi->dual_link)) {
+ if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) {
/*
* BXT spec says write MIPI_INIT_COUNT for
* both the ports, even if only one is
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
struct intel_crtc_state *config)
{
- if (IS_BROXTON(encoder->base.dev))
+ if (IS_BROXTON(to_i915(encoder->base.dev)))
return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
else
return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
int intel_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_compute_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
return bxt_compute_dsi_pll(encoder, config);
return -ENODEV;
void intel_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_enable_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_enable_dsi_pll(encoder, config);
}
void intel_disable_dsi_pll(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_disable_dsi_pll(encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_disable_dsi_pll(encoder);
}
void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_reset_clocks(encoder, port);
}
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
/* WaDisableMinuteIaClockGating:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
~GUC_ENABLE_MIA_CLOCK_GATING));
}
/* WaC6DisallowByGfxPause:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
fw_path = I915_SKL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
fw_path = I915_BXT_GUC_UCODE;
guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits)
{
- struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
if (clock < 25000)
return MODE_CLOCK_LOW;
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
- if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
+ if (IS_BROXTON(dev_priv) && clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
- if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
+ if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
return MODE_CLOCK_RANGE;
return MODE_OK;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
skl_display_core_init(dev_priv, resume);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
bxt_display_core_init(dev_priv, resume);
} else if (IS_CHERRYVIEW(dev)) {
mutex_lock(&power_domains->lock);