Commit 4efd3460 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-fixes-2018-11-16' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Live from Vancouver, SoC maintainer talk, this weeks drm fixes pull
  for rc3:

  omapdrm:
   - regression fixes for the reordering bridge stuff that went into rc1

  i915:
   - incorrect EU count fix
   - HPD storm fix
   - MST fix
   - relocation fix for gen4/5

  amdgpu:
   - huge page handling fix
   - IH ring setup
   - XGMI aperture setup
   - watermark setup fix

  misc:
   - docs and MST fix"

* tag 'drm-fixes-2018-11-16' of git://anongit.freedesktop.org/drm/drm: (23 commits)
  drm/i915: Account for scale factor when calculating initial phase
  drm/i915: Clean up skl_program_scaler()
  drm/i915: Move programming plane scaler to its own function.
  drm/i915/icl: Drop spurious register read from icl_dbuf_slices_update
  drm/i915: fix broadwell EU computation
  drm/amdgpu: fix huge page handling on Vega10
  drm/amd/pp: Fix truncated clock value when set watermark
  drm/amdgpu: fix bug with IH ring setup
  drm/meson: venc: dmt mode must use encp
  drm/amdgpu: set system aperture to cover whole FB region
  drm/i915: Fix hpd handling for pins with two encoders
  drm/i915/execlists: Force write serialisation into context image vs execution
  drm/i915/icl: Fix power well 2 wrt. DC-off toggling order
  drm/i915: Fix NULL deref when re-enabling HPD IRQs on systems with MST
  drm/i915: Fix possible race in intel_dp_add_mst_connector()
  drm/i915/ringbuffer: Delay after EMIT_INVALIDATE for gen4/gen5
  drm/omap: dsi: Fix missing of_platform_depopulate()
  drm/omap: Move DISPC runtime PM handling to omapdrm
  drm/omap: dsi: Ensure the device is active during probe
  drm/omap: hdmi4: Ensure the device is active during bind
  ...
parents ef268de1 20325e8a
......@@ -209,11 +209,61 @@ static int __init omapdss_init_fbdev(void)
return 0;
}
#else
static inline int omapdss_init_fbdev(void)
static const char * const omapdss_compat_names[] __initconst = {
"ti,omap2-dss",
"ti,omap3-dss",
"ti,omap4-dss",
"ti,omap5-dss",
"ti,dra7-dss",
};
static struct device_node * __init omapdss_find_dss_of_node(void)
{
return 0;
struct device_node *node;
int i;
for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
node = of_find_compatible_node(NULL, NULL,
omapdss_compat_names[i]);
if (node)
return node;
}
return NULL;
}
static int __init omapdss_init_of(void)
{
int r;
struct device_node *node;
struct platform_device *pdev;
/* only create dss helper devices if dss is enabled in the .dts */
node = omapdss_find_dss_of_node();
if (!node)
return 0;
if (!of_device_is_available(node))
return 0;
pdev = of_find_device_by_node(node);
if (!pdev) {
pr_err("Unable to find DSS platform device\n");
return -ENODEV;
}
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
if (r) {
pr_err("Unable to populate DSS submodule devices\n");
return r;
}
return omapdss_init_fbdev();
}
omap_device_initcall(omapdss_init_of);
#endif /* CONFIG_FB_OMAP2 */
static void dispc_disable_outputs(void)
......@@ -361,58 +411,3 @@ int omap_dss_reset(struct omap_hwmod *oh)
return r;
}
static const char * const omapdss_compat_names[] __initconst = {
"ti,omap2-dss",
"ti,omap3-dss",
"ti,omap4-dss",
"ti,omap5-dss",
"ti,dra7-dss",
};
static struct device_node * __init omapdss_find_dss_of_node(void)
{
struct device_node *node;
int i;
for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
node = of_find_compatible_node(NULL, NULL,
omapdss_compat_names[i]);
if (node)
return node;
}
return NULL;
}
static int __init omapdss_init_of(void)
{
int r;
struct device_node *node;
struct platform_device *pdev;
/* only create dss helper devices if dss is enabled in the .dts */
node = omapdss_find_dss_of_node();
if (!node)
return 0;
if (!of_device_is_available(node))
return 0;
pdev = of_find_device_by_node(node);
if (!pdev) {
pr_err("Unable to find DSS platform device\n");
return -ENODEV;
}
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
if (r) {
pr_err("Unable to populate DSS submodule devices\n");
return r;
}
return omapdss_init_fbdev();
}
omap_device_initcall(omapdss_init_of);
......@@ -1632,13 +1632,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
continue;
}
/* First check if the entry is already handled */
if (cursor.pfn < frag_start) {
cursor.entry->huge = true;
amdgpu_vm_pt_next(adev, &cursor);
continue;
}
/* If it isn't already handled it can't be a huge page */
if (cursor.entry->huge) {
/* Add the entry to the relocated list to update it. */
......@@ -1701,8 +1694,17 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
}
} while (frag_start < entry_end);
if (frag >= shift)
if (amdgpu_vm_pt_descendant(adev, &cursor)) {
/* Mark all child entries as huge */
while (cursor.pfn < frag_start) {
cursor.entry->huge = true;
amdgpu_vm_pt_next(adev, &cursor);
}
} else if (frag >= shift) {
/* or just move on to the next on the same level. */
amdgpu_vm_pt_next(adev, &cursor);
}
}
return 0;
......
......@@ -72,7 +72,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
/*
......@@ -82,11 +82,11 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* to get rid of the VM fault and hardware hang.
*/
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max((adev->gmc.vram_end >> 18) + 0x1,
max((adev->gmc.fb_end >> 18) + 0x1,
adev->gmc.agp_end >> 18));
else
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
......
......@@ -90,7 +90,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
/*
......@@ -100,11 +100,11 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* to get rid of the VM fault and hardware hang.
*/
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max((adev->gmc.vram_end >> 18) + 0x1,
max((adev->gmc.fb_end >> 18) + 0x1,
adev->gmc.agp_end >> 18));
else
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
......
......@@ -129,7 +129,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
else
wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF);
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
......
......@@ -713,20 +713,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
table->WatermarkRow[1][i].MinClock =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
1000);
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
1000));
table->WatermarkRow[1][i].MaxClock =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
1000);
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
1000));
table->WatermarkRow[1][i].MinUclk =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
1000);
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1000));
table->WatermarkRow[1][i].MaxUclk =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
1000);
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1000));
table->WatermarkRow[1][i].WmSetting = (uint8_t)
wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
}
......@@ -734,20 +734,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
table->WatermarkRow[0][i].MinClock =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
1000);
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
1000));
table->WatermarkRow[0][i].MaxClock =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
1000);
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
1000));
table->WatermarkRow[0][i].MinUclk =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
1000);
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1000));
table->WatermarkRow[0][i].MaxUclk =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
1000);
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1000));
table->WatermarkRow[0][i].WmSetting = (uint8_t)
wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
......
......@@ -1275,6 +1275,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
if (!mstb)
goto out;
for (i = 0; i < lct - 1; i++) {
int shift = (i % 2) ? 0 : 4;
int port_num = (rad[i / 2] >> shift) & 0xf;
......
......@@ -97,9 +97,9 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
/**
* drm_driver_legacy_fb_format - compute drm fourcc code from legacy description
* @dev: DRM device
* @bpp: bits per pixels
* @depth: bit depth per pixel
* @native: use host native byte order
*
* Computes a drm fourcc pixel format code for the given @bpp/@depth values.
* Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config,
......
......@@ -474,7 +474,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
u8 eu_disabled_mask;
u32 n_disabled;
if (!(sseu->subslice_mask[ss] & BIT(ss)))
if (!(sseu->subslice_mask[s] & BIT(ss)))
/* skip disabled subslice */
continue;
......
......@@ -4850,8 +4850,31 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
* chroma samples for both of the luma samples, and thus we don't
* actually get the expected MPEG2 chroma siting convention :(
* The same behaviour is observed on pre-SKL platforms as well.
*
* Theory behind the formula (note that we ignore sub-pixel
* source coordinates):
* s = source sample position
* d = destination sample position
*
* Downscaling 4:1:
* -0.5
* | 0.0
* | | 1.5 (initial phase)
* | | |
* v v v
* | s | s | s | s |
* | d |
*
* Upscaling 1:4:
* -0.5
* | -0.375 (initial phase)
* | | 0.0
* | | |
* v v v
* | s |
* | d | d | d | d |
*/
u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
{
int phase = -0x8000;
u16 trip = 0;
......@@ -4859,6 +4882,15 @@ u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
if (chroma_cosited)
phase += (sub - 1) * 0x8000 / sub;
phase += scale / (2 * sub);
/*
* Hardware initial phase limited to [-0.5:1.5].
* Since the max hardware scale factor is 3.0, we
* should never actually excdeed 1.0 here.
*/
WARN_ON(phase < -0x8000 || phase > 0x18000);
if (phase < 0)
phase = 0x10000 + phase;
else
......@@ -5067,13 +5099,20 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
if (crtc->config->pch_pfit.enabled) {
u16 uv_rgb_hphase, uv_rgb_vphase;
int pfit_w, pfit_h, hscale, vscale;
int id;
if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
return;
uv_rgb_hphase = skl_scaler_calc_phase(1, false);
uv_rgb_vphase = skl_scaler_calc_phase(1, false);
pfit_w = (crtc->config->pch_pfit.size >> 16) & 0xFFFF;
pfit_h = crtc->config->pch_pfit.size & 0xFFFF;
hscale = (crtc->config->pipe_src_w << 16) / pfit_w;
vscale = (crtc->config->pipe_src_h << 16) / pfit_h;
uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
id = scaler_state->scaler_id;
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
......
......@@ -452,6 +452,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
if (!intel_connector)
return NULL;
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
connector = &intel_connector->base;
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
......@@ -462,10 +466,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
for_each_pipe(dev_priv, pipe) {
struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base;
......
......@@ -1646,7 +1646,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
u16 skl_scaler_calc_phase(int sub, bool chroma_center);
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(const struct intel_crtc_state *crtc_state,
u32 pixel_format);
......
......@@ -228,7 +228,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_connector *intel_connector = to_intel_connector(connector);
if (intel_connector->encoder->hpd_pin == pin) {
/* Don't check MST ports, they don't have pins */
if (!intel_connector->mst_port &&
intel_connector->encoder->hpd_pin == pin) {
if (connector->polled != intel_connector->polled)
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
connector->name);
......@@ -395,37 +397,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
struct intel_encoder *encoder;
bool storm_detected = false;
bool queue_dig = false, queue_hp = false;
u32 long_hpd_pulse_mask = 0;
u32 short_hpd_pulse_mask = 0;
enum hpd_pin pin;
if (!pin_mask)
return;
spin_lock(&dev_priv->irq_lock);
/*
* Determine whether ->hpd_pulse() exists for each pin, and
* whether we have a short or a long pulse. This is needed
* as each pin may have up to two encoders (HDMI and DP) and
* only the one of them (DP) will have ->hpd_pulse().
*/
for_each_intel_encoder(&dev_priv->drm, encoder) {
enum hpd_pin pin = encoder->hpd_pin;
bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
enum port port = encoder->port;
bool long_hpd;
pin = encoder->hpd_pin;
if (!(BIT(pin) & pin_mask))
continue;
if (has_hpd_pulse) {
bool long_hpd = long_mask & BIT(pin);
enum port port = encoder->port;
if (!has_hpd_pulse)
continue;
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
long_hpd ? "long" : "short");
/*
* For long HPD pulses we want to have the digital queue happen,
* but we still want HPD storm detection to function.
*/
queue_dig = true;
if (long_hpd) {
dev_priv->hotplug.long_port_mask |= (1 << port);
} else {
/* for short HPD just trigger the digital queue */
dev_priv->hotplug.short_port_mask |= (1 << port);
continue;
}
long_hpd = long_mask & BIT(pin);
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
long_hpd ? "long" : "short");
queue_dig = true;
if (long_hpd) {
long_hpd_pulse_mask |= BIT(pin);
dev_priv->hotplug.long_port_mask |= BIT(port);
} else {
short_hpd_pulse_mask |= BIT(pin);
dev_priv->hotplug.short_port_mask |= BIT(port);
}
}
/* Now process each pin just once */
for_each_hpd_pin(pin) {
bool long_hpd;
if (!(BIT(pin) & pin_mask))
continue;
if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
/*
......@@ -442,11 +461,22 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
continue;
if (!has_hpd_pulse) {
/*
* Delegate to ->hpd_pulse() if one of the encoders for this
* pin has it, otherwise let the hotplug_work deal with this
* pin directly.
*/
if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
long_hpd = long_hpd_pulse_mask & BIT(pin);
} else {
dev_priv->hotplug.event_bits |= BIT(pin);
long_hpd = true;
queue_hp = true;
}
if (!long_hpd)
continue;
if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
dev_priv->hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
......
......@@ -424,7 +424,8 @@ static u64 execlists_update_context(struct i915_request *rq)
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
/* True 32b PPGTT with dynamic page allocation: update PDP
/*
* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page.
* PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode.
......@@ -432,6 +433,17 @@ static u64 execlists_update_context(struct i915_request *rq)
if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
execlists_update_context_pdps(ppgtt, reg_state);
/*
* Make sure the context image is complete before we submit it to HW.
*
* Ostensibly, writes (including the WCB) should be flushed prior to
* an uncached write such as our mmio register access, the empirical
* evidence (esp. on Braswell) suggests that the WC write into memory
* may not be visible to the HW prior to the completion of the UC
* register write and that we may begin execution from the context
* before its image is complete leading to invalid PD chasing.
*/
wmb();
return ce->lrc_desc;
}
......
......@@ -91,6 +91,7 @@ static int
gen4_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
int i;
/*
* read/write caches:
......@@ -127,12 +128,45 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
cmd |= MI_INVALIDATE_ISP;
}
cs = intel_ring_begin(rq, 2);
i = 2;
if (mode & EMIT_INVALIDATE)
i += 20;
cs = intel_ring_begin(rq, i);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
*cs++ = MI_NOOP;
/*
* A random delay to let the CS invalidate take effect? Without this
* delay, the GPU relocation path fails as the CS does not see
* the updated contents. Just as important, if we apply the flushes
* to the EMIT_FLUSH branch (i.e. immediately after the relocation
* write and before the invalidate on the next batch), the relocations
* still fail. This implies that is a delay following invalidation
* that is required to reset the caches as opposed to a delay to
* ensure the memory is written.
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
*cs++ = i915_ggtt_offset(rq->engine->scratch) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
for (i = 0; i < 12; i++)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
*cs++ = i915_ggtt_offset(rq->engine->scratch) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
*cs++ = cmd;
intel_ring_advance(rq, cs);
return 0;
......
......@@ -2748,6 +2748,12 @@ static const struct i915_power_well_desc icl_power_wells[] = {
.hsw.has_fuses = true,
},
},
{
.name = "DC off",
.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,